1 /* SuperH Ethernet device driver 2 * 3 * Copyright (C) 2014 Renesas Electronics Corporation 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 5 * Copyright (C) 2008-2014 Renesas Solutions Corp. 6 * Copyright (C) 2013-2014 Cogent Embedded, Inc. 7 * Copyright (C) 2014 Codethink Limited 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms and conditions of the GNU General Public License, 11 * version 2, as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 */ 21 22 #include <linux/module.h> 23 #include <linux/kernel.h> 24 #include <linux/spinlock.h> 25 #include <linux/interrupt.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/etherdevice.h> 28 #include <linux/delay.h> 29 #include <linux/platform_device.h> 30 #include <linux/mdio-bitbang.h> 31 #include <linux/netdevice.h> 32 #include <linux/of.h> 33 #include <linux/of_device.h> 34 #include <linux/of_irq.h> 35 #include <linux/of_net.h> 36 #include <linux/phy.h> 37 #include <linux/cache.h> 38 #include <linux/io.h> 39 #include <linux/pm_runtime.h> 40 #include <linux/slab.h> 41 #include <linux/ethtool.h> 42 #include <linux/if_vlan.h> 43 #include <linux/clk.h> 44 #include <linux/sh_eth.h> 45 #include <linux/of_mdio.h> 46 47 #include "sh_eth.h" 48 49 #define SH_ETH_DEF_MSG_ENABLE \ 50 (NETIF_MSG_LINK | \ 51 NETIF_MSG_TIMER | \ 52 NETIF_MSG_RX_ERR| \ 53 NETIF_MSG_TX_ERR) 54 55 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { 56 [EDSR] = 0x0000, 57 [EDMR] = 0x0400, 58 [EDTRR] = 0x0408, 59 [EDRRR] = 0x0410, 60 [EESR] = 0x0428, 61 [EESIPR] = 0x0430, 62 [TDLAR] = 0x0010, 63 [TDFAR] = 0x0014, 64 [TDFXR] = 0x0018, 65 [TDFFR] = 0x001c, 66 [RDLAR] = 0x0030, 67 [RDFAR] = 0x0034, 68 [RDFXR] = 0x0038, 69 [RDFFR] = 0x003c, 70 [TRSCER] = 0x0438, 71 [RMFCR] = 0x0440, 72 [TFTR] = 0x0448, 73 [FDR] = 0x0450, 74 [RMCR] = 0x0458, 75 [RPADIR] = 0x0460, 76 [FCFTR] = 0x0468, 77 [CSMR] = 0x04E4, 78 79 [ECMR] = 0x0500, 80 [ECSR] = 0x0510, 81 [ECSIPR] = 0x0518, 82 [PIR] = 0x0520, 83 [PSR] = 0x0528, 84 [PIPR] = 0x052c, 85 [RFLR] = 0x0508, 86 [APR] = 0x0554, 87 [MPR] = 0x0558, 88 [PFTCR] = 0x055c, 89 [PFRCR] = 0x0560, 90 [TPAUSER] = 0x0564, 91 [GECMR] = 0x05b0, 92 [BCULR] = 0x05b4, 93 [MAHR] = 0x05c0, 94 [MALR] = 0x05c8, 95 [TROCR] = 0x0700, 96 [CDCR] = 0x0708, 97 [LCCR] = 0x0710, 98 [CEFCR] = 0x0740, 99 [FRECR] = 0x0748, 100 [TSFRCR] = 0x0750, 101 [TLFRCR] = 0x0758, 102 [RFCR] = 0x0760, 103 [CERCR] = 0x0768, 104 [CEECR] = 0x0770, 105 [MAFCR] = 0x0778, 106 [RMII_MII] = 0x0790, 107 108 [ARSTR] = 0x0000, 109 [TSU_CTRST] = 0x0004, 110 [TSU_FWEN0] = 0x0010, 111 [TSU_FWEN1] = 0x0014, 112 [TSU_FCM] = 0x0018, 113 [TSU_BSYSL0] = 0x0020, 114 [TSU_BSYSL1] = 0x0024, 115 [TSU_PRISL0] = 0x0028, 116 [TSU_PRISL1] = 0x002c, 117 [TSU_FWSL0] = 0x0030, 118 [TSU_FWSL1] = 0x0034, 119 [TSU_FWSLC] = 0x0038, 120 [TSU_QTAG0] = 0x0040, 121 [TSU_QTAG1] = 0x0044, 122 [TSU_FWSR] = 0x0050, 123 [TSU_FWINMK] = 0x0054, 124 [TSU_ADQT0] = 0x0048, 125 [TSU_ADQT1] = 0x004c, 126 [TSU_VTAG0] = 0x0058, 127 [TSU_VTAG1] = 0x005c, 128 [TSU_ADSBSY] = 0x0060, 129 [TSU_TEN] = 0x0064, 130 [TSU_POST1] = 0x0070, 131 [TSU_POST2] = 0x0074, 132 [TSU_POST3] = 0x0078, 133 [TSU_POST4] = 0x007c, 134 [TSU_ADRH0] = 0x0100, 135 [TSU_ADRL0] = 0x0104, 136 [TSU_ADRH31] = 0x01f8, 137 [TSU_ADRL31] = 0x01fc, 138 139 [TXNLCR0] = 0x0080, 140 [TXALCR0] = 0x0084, 141 [RXNLCR0] = 0x0088, 142 [RXALCR0] = 0x008c, 143 [FWNLCR0] = 0x0090, 144 [FWALCR0] = 0x0094, 145 [TXNLCR1] = 0x00a0, 146 [TXALCR1] = 0x00a0, 147 [RXNLCR1] = 0x00a8, 148 [RXALCR1] = 0x00ac, 149 [FWNLCR1] = 0x00b0, 150 [FWALCR1] = 0x00b4, 151 }; 152 153 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { 154 [EDSR] = 0x0000, 155 [EDMR] = 0x0400, 156 [EDTRR] = 0x0408, 157 [EDRRR] = 0x0410, 158 [EESR] = 0x0428, 159 [EESIPR] = 0x0430, 160 [TDLAR] = 0x0010, 161 [TDFAR] = 0x0014, 162 [TDFXR] = 0x0018, 163 [TDFFR] = 0x001c, 164 [RDLAR] = 0x0030, 165 [RDFAR] = 0x0034, 166 [RDFXR] = 0x0038, 167 [RDFFR] = 0x003c, 168 [TRSCER] = 0x0438, 169 [RMFCR] = 0x0440, 170 [TFTR] = 0x0448, 171 [FDR] = 0x0450, 172 [RMCR] = 0x0458, 173 [RPADIR] = 0x0460, 174 [FCFTR] = 0x0468, 175 [CSMR] = 0x04E4, 176 177 [ECMR] = 0x0500, 178 [RFLR] = 0x0508, 179 [ECSR] = 0x0510, 180 [ECSIPR] = 0x0518, 181 [PIR] = 0x0520, 182 [APR] = 0x0554, 183 [MPR] = 0x0558, 184 [PFTCR] = 0x055c, 185 [PFRCR] = 0x0560, 186 [TPAUSER] = 0x0564, 187 [MAHR] = 0x05c0, 188 [MALR] = 0x05c8, 189 [CEFCR] = 0x0740, 190 [FRECR] = 0x0748, 191 [TSFRCR] = 0x0750, 192 [TLFRCR] = 0x0758, 193 [RFCR] = 0x0760, 194 [MAFCR] = 0x0778, 195 196 [ARSTR] = 0x0000, 197 [TSU_CTRST] = 0x0004, 198 [TSU_VTAG0] = 0x0058, 199 [TSU_ADSBSY] = 0x0060, 200 [TSU_TEN] = 0x0064, 201 [TSU_ADRH0] = 0x0100, 202 [TSU_ADRL0] = 0x0104, 203 [TSU_ADRH31] = 0x01f8, 204 [TSU_ADRL31] = 0x01fc, 205 206 [TXNLCR0] = 0x0080, 207 [TXALCR0] = 0x0084, 208 [RXNLCR0] = 0x0088, 209 [RXALCR0] = 0x008C, 210 }; 211 212 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { 213 [ECMR] = 0x0300, 214 [RFLR] = 0x0308, 215 [ECSR] = 0x0310, 216 [ECSIPR] = 0x0318, 217 [PIR] = 0x0320, 218 [PSR] = 0x0328, 219 [RDMLR] = 0x0340, 220 [IPGR] = 0x0350, 221 [APR] = 0x0354, 222 [MPR] = 0x0358, 223 [RFCF] = 0x0360, 224 [TPAUSER] = 0x0364, 225 [TPAUSECR] = 0x0368, 226 [MAHR] = 0x03c0, 227 [MALR] = 0x03c8, 228 [TROCR] = 0x03d0, 229 [CDCR] = 0x03d4, 230 [LCCR] = 0x03d8, 231 [CNDCR] = 0x03dc, 232 [CEFCR] = 0x03e4, 233 [FRECR] = 0x03e8, 234 [TSFRCR] = 0x03ec, 235 [TLFRCR] = 0x03f0, 236 [RFCR] = 0x03f4, 237 [MAFCR] = 0x03f8, 238 239 [EDMR] = 0x0200, 240 [EDTRR] = 0x0208, 241 [EDRRR] = 0x0210, 242 [TDLAR] = 0x0218, 243 [RDLAR] = 0x0220, 244 [EESR] = 0x0228, 245 [EESIPR] = 0x0230, 246 [TRSCER] = 0x0238, 247 [RMFCR] = 0x0240, 248 [TFTR] = 0x0248, 249 [FDR] = 0x0250, 250 [RMCR] = 0x0258, 251 [TFUCR] = 0x0264, 252 [RFOCR] = 0x0268, 253 [RMIIMODE] = 0x026c, 254 [FCFTR] = 0x0270, 255 [TRIMD] = 0x027c, 256 }; 257 258 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { 259 [ECMR] = 0x0100, 260 [RFLR] = 0x0108, 261 [ECSR] = 0x0110, 262 [ECSIPR] = 0x0118, 263 [PIR] = 0x0120, 264 [PSR] = 0x0128, 265 [RDMLR] = 0x0140, 266 [IPGR] = 0x0150, 267 [APR] = 0x0154, 268 [MPR] = 0x0158, 269 [TPAUSER] = 0x0164, 270 [RFCF] = 0x0160, 271 [TPAUSECR] = 0x0168, 272 [BCFRR] = 0x016c, 273 [MAHR] = 0x01c0, 274 [MALR] = 0x01c8, 275 [TROCR] = 0x01d0, 276 [CDCR] = 0x01d4, 277 [LCCR] = 0x01d8, 278 [CNDCR] = 0x01dc, 279 [CEFCR] = 0x01e4, 280 [FRECR] = 0x01e8, 281 [TSFRCR] = 0x01ec, 282 [TLFRCR] = 0x01f0, 283 [RFCR] = 0x01f4, 284 [MAFCR] = 0x01f8, 285 [RTRATE] = 0x01fc, 286 287 [EDMR] = 0x0000, 288 [EDTRR] = 0x0008, 289 [EDRRR] = 0x0010, 290 [TDLAR] = 0x0018, 291 [RDLAR] = 0x0020, 292 [EESR] = 0x0028, 293 [EESIPR] = 0x0030, 294 [TRSCER] = 0x0038, 295 [RMFCR] = 0x0040, 296 [TFTR] = 0x0048, 297 [FDR] = 0x0050, 298 [RMCR] = 0x0058, 299 [TFUCR] = 0x0064, 300 [RFOCR] = 0x0068, 301 [FCFTR] = 0x0070, 302 [RPADIR] = 0x0078, 303 [TRIMD] = 0x007c, 304 [RBWAR] = 0x00c8, 305 [RDFAR] = 0x00cc, 306 [TBRAR] = 0x00d4, 307 [TDFAR] = 0x00d8, 308 }; 309 310 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { 311 [EDMR] = 0x0000, 312 [EDTRR] = 0x0004, 313 [EDRRR] = 0x0008, 314 [TDLAR] = 0x000c, 315 [RDLAR] = 0x0010, 316 [EESR] = 0x0014, 317 [EESIPR] = 0x0018, 318 [TRSCER] = 0x001c, 319 [RMFCR] = 0x0020, 320 [TFTR] = 0x0024, 321 [FDR] = 0x0028, 322 [RMCR] = 0x002c, 323 [EDOCR] = 0x0030, 324 [FCFTR] = 0x0034, 325 [RPADIR] = 0x0038, 326 [TRIMD] = 0x003c, 327 [RBWAR] = 0x0040, 328 [RDFAR] = 0x0044, 329 [TBRAR] = 0x004c, 330 [TDFAR] = 0x0050, 331 332 [ECMR] = 0x0160, 333 [ECSR] = 0x0164, 334 [ECSIPR] = 0x0168, 335 [PIR] = 0x016c, 336 [MAHR] = 0x0170, 337 [MALR] = 0x0174, 338 [RFLR] = 0x0178, 339 [PSR] = 0x017c, 340 [TROCR] = 0x0180, 341 [CDCR] = 0x0184, 342 [LCCR] = 0x0188, 343 [CNDCR] = 0x018c, 344 [CEFCR] = 0x0194, 345 [FRECR] = 0x0198, 346 [TSFRCR] = 0x019c, 347 [TLFRCR] = 0x01a0, 348 [RFCR] = 0x01a4, 349 [MAFCR] = 0x01a8, 350 [IPGR] = 0x01b4, 351 [APR] = 0x01b8, 352 [MPR] = 0x01bc, 353 [TPAUSER] = 0x01c4, 354 [BCFR] = 0x01cc, 355 356 [ARSTR] = 0x0000, 357 [TSU_CTRST] = 0x0004, 358 [TSU_FWEN0] = 0x0010, 359 [TSU_FWEN1] = 0x0014, 360 [TSU_FCM] = 0x0018, 361 [TSU_BSYSL0] = 0x0020, 362 [TSU_BSYSL1] = 0x0024, 363 [TSU_PRISL0] = 0x0028, 364 [TSU_PRISL1] = 0x002c, 365 [TSU_FWSL0] = 0x0030, 366 [TSU_FWSL1] = 0x0034, 367 [TSU_FWSLC] = 0x0038, 368 [TSU_QTAGM0] = 0x0040, 369 [TSU_QTAGM1] = 0x0044, 370 [TSU_ADQT0] = 0x0048, 371 [TSU_ADQT1] = 0x004c, 372 [TSU_FWSR] = 0x0050, 373 [TSU_FWINMK] = 0x0054, 374 [TSU_ADSBSY] = 0x0060, 375 [TSU_TEN] = 0x0064, 376 [TSU_POST1] = 0x0070, 377 [TSU_POST2] = 0x0074, 378 [TSU_POST3] = 0x0078, 379 [TSU_POST4] = 0x007c, 380 381 [TXNLCR0] = 0x0080, 382 [TXALCR0] = 0x0084, 383 [RXNLCR0] = 0x0088, 384 [RXALCR0] = 0x008c, 385 [FWNLCR0] = 0x0090, 386 [FWALCR0] = 0x0094, 387 [TXNLCR1] = 0x00a0, 388 [TXALCR1] = 0x00a0, 389 [RXNLCR1] = 0x00a8, 390 [RXALCR1] = 0x00ac, 391 [FWNLCR1] = 0x00b0, 392 [FWALCR1] = 0x00b4, 393 394 [TSU_ADRH0] = 0x0100, 395 [TSU_ADRL0] = 0x0104, 396 [TSU_ADRL31] = 0x01fc, 397 }; 398 399 static bool sh_eth_is_gether(struct sh_eth_private *mdp) 400 { 401 return mdp->reg_offset == sh_eth_offset_gigabit; 402 } 403 404 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp) 405 { 406 return mdp->reg_offset == sh_eth_offset_fast_rz; 407 } 408 409 static void sh_eth_select_mii(struct net_device *ndev) 410 { 411 u32 value = 0x0; 412 struct sh_eth_private *mdp = netdev_priv(ndev); 413 414 switch (mdp->phy_interface) { 415 case PHY_INTERFACE_MODE_GMII: 416 value = 0x2; 417 break; 418 case PHY_INTERFACE_MODE_MII: 419 value = 0x1; 420 break; 421 case PHY_INTERFACE_MODE_RMII: 422 value = 0x0; 423 break; 424 default: 425 netdev_warn(ndev, 426 "PHY interface mode was not setup. Set to MII.\n"); 427 value = 0x1; 428 break; 429 } 430 431 sh_eth_write(ndev, value, RMII_MII); 432 } 433 434 static void sh_eth_set_duplex(struct net_device *ndev) 435 { 436 struct sh_eth_private *mdp = netdev_priv(ndev); 437 438 if (mdp->duplex) /* Full */ 439 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 440 else /* Half */ 441 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 442 } 443 444 /* There is CPU dependent code */ 445 static void sh_eth_set_rate_r8a777x(struct net_device *ndev) 446 { 447 struct sh_eth_private *mdp = netdev_priv(ndev); 448 449 switch (mdp->speed) { 450 case 10: /* 10BASE */ 451 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR); 452 break; 453 case 100:/* 100BASE */ 454 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR); 455 break; 456 default: 457 break; 458 } 459 } 460 461 /* R8A7778/9 */ 462 static struct sh_eth_cpu_data r8a777x_data = { 463 .set_duplex = sh_eth_set_duplex, 464 .set_rate = sh_eth_set_rate_r8a777x, 465 466 .register_type = SH_ETH_REG_FAST_RCAR, 467 468 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 469 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 470 .eesipr_value = 0x01ff009f, 471 472 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 473 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 474 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 475 EESR_ECI, 476 477 .apr = 1, 478 .mpr = 1, 479 .tpauser = 1, 480 .hw_swap = 1, 481 }; 482 483 /* R8A7790/1 */ 484 static struct sh_eth_cpu_data r8a779x_data = { 485 .set_duplex = sh_eth_set_duplex, 486 .set_rate = sh_eth_set_rate_r8a777x, 487 488 .register_type = SH_ETH_REG_FAST_RCAR, 489 490 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 491 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 492 .eesipr_value = 0x01ff009f, 493 494 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 495 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 496 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 497 EESR_ECI, 498 499 .apr = 1, 500 .mpr = 1, 501 .tpauser = 1, 502 .hw_swap = 1, 503 .rmiimode = 1, 504 .shift_rd0 = 1, 505 }; 506 507 static void sh_eth_set_rate_sh7724(struct net_device *ndev) 508 { 509 struct sh_eth_private *mdp = netdev_priv(ndev); 510 511 switch (mdp->speed) { 512 case 10: /* 10BASE */ 513 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); 514 break; 515 case 100:/* 100BASE */ 516 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); 517 break; 518 default: 519 break; 520 } 521 } 522 523 /* SH7724 */ 524 static struct sh_eth_cpu_data sh7724_data = { 525 .set_duplex = sh_eth_set_duplex, 526 .set_rate = sh_eth_set_rate_sh7724, 527 528 .register_type = SH_ETH_REG_FAST_SH4, 529 530 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 531 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 532 .eesipr_value = 0x01ff009f, 533 534 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 535 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 536 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 537 EESR_ECI, 538 539 .apr = 1, 540 .mpr = 1, 541 .tpauser = 1, 542 .hw_swap = 1, 543 .rpadir = 1, 544 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 545 }; 546 547 static void sh_eth_set_rate_sh7757(struct net_device *ndev) 548 { 549 struct sh_eth_private *mdp = netdev_priv(ndev); 550 551 switch (mdp->speed) { 552 case 10: /* 10BASE */ 553 sh_eth_write(ndev, 0, RTRATE); 554 break; 555 case 100:/* 100BASE */ 556 sh_eth_write(ndev, 1, RTRATE); 557 break; 558 default: 559 break; 560 } 561 } 562 563 /* SH7757 */ 564 static struct sh_eth_cpu_data sh7757_data = { 565 .set_duplex = sh_eth_set_duplex, 566 .set_rate = sh_eth_set_rate_sh7757, 567 568 .register_type = SH_ETH_REG_FAST_SH4, 569 570 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 571 572 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 573 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 574 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 575 EESR_ECI, 576 577 .irq_flags = IRQF_SHARED, 578 .apr = 1, 579 .mpr = 1, 580 .tpauser = 1, 581 .hw_swap = 1, 582 .no_ade = 1, 583 .rpadir = 1, 584 .rpadir_value = 2 << 16, 585 }; 586 587 #define SH_GIGA_ETH_BASE 0xfee00000UL 588 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) 589 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) 590 static void sh_eth_chip_reset_giga(struct net_device *ndev) 591 { 592 int i; 593 unsigned long mahr[2], malr[2]; 594 595 /* save MAHR and MALR */ 596 for (i = 0; i < 2; i++) { 597 malr[i] = ioread32((void *)GIGA_MALR(i)); 598 mahr[i] = ioread32((void *)GIGA_MAHR(i)); 599 } 600 601 /* reset device */ 602 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); 603 mdelay(1); 604 605 /* restore MAHR and MALR */ 606 for (i = 0; i < 2; i++) { 607 iowrite32(malr[i], (void *)GIGA_MALR(i)); 608 iowrite32(mahr[i], (void *)GIGA_MAHR(i)); 609 } 610 } 611 612 static void sh_eth_set_rate_giga(struct net_device *ndev) 613 { 614 struct sh_eth_private *mdp = netdev_priv(ndev); 615 616 switch (mdp->speed) { 617 case 10: /* 10BASE */ 618 sh_eth_write(ndev, 0x00000000, GECMR); 619 break; 620 case 100:/* 100BASE */ 621 sh_eth_write(ndev, 0x00000010, GECMR); 622 break; 623 case 1000: /* 1000BASE */ 624 sh_eth_write(ndev, 0x00000020, GECMR); 625 break; 626 default: 627 break; 628 } 629 } 630 631 /* SH7757(GETHERC) */ 632 static struct sh_eth_cpu_data sh7757_data_giga = { 633 .chip_reset = sh_eth_chip_reset_giga, 634 .set_duplex = sh_eth_set_duplex, 635 .set_rate = sh_eth_set_rate_giga, 636 637 .register_type = SH_ETH_REG_GIGABIT, 638 639 .ecsr_value = ECSR_ICD | ECSR_MPD, 640 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 641 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 642 643 .tx_check = EESR_TC1 | EESR_FTC, 644 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 645 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 646 EESR_TDE | EESR_ECI, 647 .fdr_value = 0x0000072f, 648 649 .irq_flags = IRQF_SHARED, 650 .apr = 1, 651 .mpr = 1, 652 .tpauser = 1, 653 .bculr = 1, 654 .hw_swap = 1, 655 .rpadir = 1, 656 .rpadir_value = 2 << 16, 657 .no_trimd = 1, 658 .no_ade = 1, 659 .tsu = 1, 660 }; 661 662 static void sh_eth_chip_reset(struct net_device *ndev) 663 { 664 struct sh_eth_private *mdp = netdev_priv(ndev); 665 666 /* reset device */ 667 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 668 mdelay(1); 669 } 670 671 static void sh_eth_set_rate_gether(struct net_device *ndev) 672 { 673 struct sh_eth_private *mdp = netdev_priv(ndev); 674 675 switch (mdp->speed) { 676 case 10: /* 10BASE */ 677 sh_eth_write(ndev, GECMR_10, GECMR); 678 break; 679 case 100:/* 100BASE */ 680 sh_eth_write(ndev, GECMR_100, GECMR); 681 break; 682 case 1000: /* 1000BASE */ 683 sh_eth_write(ndev, GECMR_1000, GECMR); 684 break; 685 default: 686 break; 687 } 688 } 689 690 /* SH7734 */ 691 static struct sh_eth_cpu_data sh7734_data = { 692 .chip_reset = sh_eth_chip_reset, 693 .set_duplex = sh_eth_set_duplex, 694 .set_rate = sh_eth_set_rate_gether, 695 696 .register_type = SH_ETH_REG_GIGABIT, 697 698 .ecsr_value = ECSR_ICD | ECSR_MPD, 699 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 700 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 701 702 .tx_check = EESR_TC1 | EESR_FTC, 703 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 704 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 705 EESR_TDE | EESR_ECI, 706 707 .apr = 1, 708 .mpr = 1, 709 .tpauser = 1, 710 .bculr = 1, 711 .hw_swap = 1, 712 .no_trimd = 1, 713 .no_ade = 1, 714 .tsu = 1, 715 .hw_crc = 1, 716 .select_mii = 1, 717 }; 718 719 /* SH7763 */ 720 static struct sh_eth_cpu_data sh7763_data = { 721 .chip_reset = sh_eth_chip_reset, 722 .set_duplex = sh_eth_set_duplex, 723 .set_rate = sh_eth_set_rate_gether, 724 725 .register_type = SH_ETH_REG_GIGABIT, 726 727 .ecsr_value = ECSR_ICD | ECSR_MPD, 728 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 729 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 730 731 .tx_check = EESR_TC1 | EESR_FTC, 732 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 733 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 734 EESR_ECI, 735 736 .apr = 1, 737 .mpr = 1, 738 .tpauser = 1, 739 .bculr = 1, 740 .hw_swap = 1, 741 .no_trimd = 1, 742 .no_ade = 1, 743 .tsu = 1, 744 .irq_flags = IRQF_SHARED, 745 }; 746 747 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) 748 { 749 struct sh_eth_private *mdp = netdev_priv(ndev); 750 751 /* reset device */ 752 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 753 mdelay(1); 754 755 sh_eth_select_mii(ndev); 756 } 757 758 /* R8A7740 */ 759 static struct sh_eth_cpu_data r8a7740_data = { 760 .chip_reset = sh_eth_chip_reset_r8a7740, 761 .set_duplex = sh_eth_set_duplex, 762 .set_rate = sh_eth_set_rate_gether, 763 764 .register_type = SH_ETH_REG_GIGABIT, 765 766 .ecsr_value = ECSR_ICD | ECSR_MPD, 767 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 768 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 769 770 .tx_check = EESR_TC1 | EESR_FTC, 771 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 772 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 773 EESR_TDE | EESR_ECI, 774 .fdr_value = 0x0000070f, 775 776 .apr = 1, 777 .mpr = 1, 778 .tpauser = 1, 779 .bculr = 1, 780 .hw_swap = 1, 781 .rpadir = 1, 782 .rpadir_value = 2 << 16, 783 .no_trimd = 1, 784 .no_ade = 1, 785 .tsu = 1, 786 .select_mii = 1, 787 .shift_rd0 = 1, 788 }; 789 790 /* R7S72100 */ 791 static struct sh_eth_cpu_data r7s72100_data = { 792 .chip_reset = sh_eth_chip_reset, 793 .set_duplex = sh_eth_set_duplex, 794 795 .register_type = SH_ETH_REG_FAST_RZ, 796 797 .ecsr_value = ECSR_ICD, 798 .ecsipr_value = ECSIPR_ICDIP, 799 .eesipr_value = 0xff7f009f, 800 801 .tx_check = EESR_TC1 | EESR_FTC, 802 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 803 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 804 EESR_TDE | EESR_ECI, 805 .fdr_value = 0x0000070f, 806 807 .no_psr = 1, 808 .apr = 1, 809 .mpr = 1, 810 .tpauser = 1, 811 .hw_swap = 1, 812 .rpadir = 1, 813 .rpadir_value = 2 << 16, 814 .no_trimd = 1, 815 .no_ade = 1, 816 .hw_crc = 1, 817 .tsu = 1, 818 .shift_rd0 = 1, 819 }; 820 821 static struct sh_eth_cpu_data sh7619_data = { 822 .register_type = SH_ETH_REG_FAST_SH3_SH2, 823 824 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 825 826 .apr = 1, 827 .mpr = 1, 828 .tpauser = 1, 829 .hw_swap = 1, 830 }; 831 832 static struct sh_eth_cpu_data sh771x_data = { 833 .register_type = SH_ETH_REG_FAST_SH3_SH2, 834 835 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 836 .tsu = 1, 837 }; 838 839 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) 840 { 841 if (!cd->ecsr_value) 842 cd->ecsr_value = DEFAULT_ECSR_INIT; 843 844 if (!cd->ecsipr_value) 845 cd->ecsipr_value = DEFAULT_ECSIPR_INIT; 846 847 if (!cd->fcftr_value) 848 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | 849 DEFAULT_FIFO_F_D_RFD; 850 851 if (!cd->fdr_value) 852 cd->fdr_value = DEFAULT_FDR_INIT; 853 854 if (!cd->tx_check) 855 cd->tx_check = DEFAULT_TX_CHECK; 856 857 if (!cd->eesr_err_check) 858 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 859 } 860 861 static int sh_eth_check_reset(struct net_device *ndev) 862 { 863 int ret = 0; 864 int cnt = 100; 865 866 while (cnt > 0) { 867 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 868 break; 869 mdelay(1); 870 cnt--; 871 } 872 if (cnt <= 0) { 873 netdev_err(ndev, "Device reset failed\n"); 874 ret = -ETIMEDOUT; 875 } 876 return ret; 877 } 878 879 static int sh_eth_reset(struct net_device *ndev) 880 { 881 struct sh_eth_private *mdp = netdev_priv(ndev); 882 int ret = 0; 883 884 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) { 885 sh_eth_write(ndev, EDSR_ENALL, EDSR); 886 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, 887 EDMR); 888 889 ret = sh_eth_check_reset(ndev); 890 if (ret) 891 return ret; 892 893 /* Table Init */ 894 sh_eth_write(ndev, 0x0, TDLAR); 895 sh_eth_write(ndev, 0x0, TDFAR); 896 sh_eth_write(ndev, 0x0, TDFXR); 897 sh_eth_write(ndev, 0x0, TDFFR); 898 sh_eth_write(ndev, 0x0, RDLAR); 899 sh_eth_write(ndev, 0x0, RDFAR); 900 sh_eth_write(ndev, 0x0, RDFXR); 901 sh_eth_write(ndev, 0x0, RDFFR); 902 903 /* Reset HW CRC register */ 904 if (mdp->cd->hw_crc) 905 sh_eth_write(ndev, 0x0, CSMR); 906 907 /* Select MII mode */ 908 if (mdp->cd->select_mii) 909 sh_eth_select_mii(ndev); 910 } else { 911 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, 912 EDMR); 913 mdelay(3); 914 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, 915 EDMR); 916 } 917 918 return ret; 919 } 920 921 static void sh_eth_set_receive_align(struct sk_buff *skb) 922 { 923 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1); 924 925 if (reserve) 926 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); 927 } 928 929 930 /* CPU <-> EDMAC endian convert */ 931 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 932 { 933 switch (mdp->edmac_endian) { 934 case EDMAC_LITTLE_ENDIAN: 935 return cpu_to_le32(x); 936 case EDMAC_BIG_ENDIAN: 937 return cpu_to_be32(x); 938 } 939 return x; 940 } 941 942 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 943 { 944 switch (mdp->edmac_endian) { 945 case EDMAC_LITTLE_ENDIAN: 946 return le32_to_cpu(x); 947 case EDMAC_BIG_ENDIAN: 948 return be32_to_cpu(x); 949 } 950 return x; 951 } 952 953 /* Program the hardware MAC address from dev->dev_addr. */ 954 static void update_mac_address(struct net_device *ndev) 955 { 956 sh_eth_write(ndev, 957 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 958 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 959 sh_eth_write(ndev, 960 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 961 } 962 963 /* Get MAC address from SuperH MAC address register 964 * 965 * SuperH's Ethernet device doesn't have 'ROM' to MAC address. 966 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). 967 * When you want use this device, you must set MAC address in bootloader. 968 * 969 */ 970 static void read_mac_address(struct net_device *ndev, unsigned char *mac) 971 { 972 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 973 memcpy(ndev->dev_addr, mac, ETH_ALEN); 974 } else { 975 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); 976 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; 977 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; 978 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); 979 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; 980 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); 981 } 982 } 983 984 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 985 { 986 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) 987 return EDTRR_TRNS_GETHER; 988 else 989 return EDTRR_TRNS_ETHER; 990 } 991 992 struct bb_info { 993 void (*set_gate)(void *addr); 994 struct mdiobb_ctrl ctrl; 995 void *addr; 996 u32 mmd_msk;/* MMD */ 997 u32 mdo_msk; 998 u32 mdi_msk; 999 u32 mdc_msk; 1000 }; 1001 1002 /* PHY bit set */ 1003 static void bb_set(void *addr, u32 msk) 1004 { 1005 iowrite32(ioread32(addr) | msk, addr); 1006 } 1007 1008 /* PHY bit clear */ 1009 static void bb_clr(void *addr, u32 msk) 1010 { 1011 iowrite32((ioread32(addr) & ~msk), addr); 1012 } 1013 1014 /* PHY bit read */ 1015 static int bb_read(void *addr, u32 msk) 1016 { 1017 return (ioread32(addr) & msk) != 0; 1018 } 1019 1020 /* Data I/O pin control */ 1021 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 1022 { 1023 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1024 1025 if (bitbang->set_gate) 1026 bitbang->set_gate(bitbang->addr); 1027 1028 if (bit) 1029 bb_set(bitbang->addr, bitbang->mmd_msk); 1030 else 1031 bb_clr(bitbang->addr, bitbang->mmd_msk); 1032 } 1033 1034 /* Set bit data*/ 1035 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) 1036 { 1037 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1038 1039 if (bitbang->set_gate) 1040 bitbang->set_gate(bitbang->addr); 1041 1042 if (bit) 1043 bb_set(bitbang->addr, bitbang->mdo_msk); 1044 else 1045 bb_clr(bitbang->addr, bitbang->mdo_msk); 1046 } 1047 1048 /* Get bit data*/ 1049 static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 1050 { 1051 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1052 1053 if (bitbang->set_gate) 1054 bitbang->set_gate(bitbang->addr); 1055 1056 return bb_read(bitbang->addr, bitbang->mdi_msk); 1057 } 1058 1059 /* MDC pin control */ 1060 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) 1061 { 1062 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 1063 1064 if (bitbang->set_gate) 1065 bitbang->set_gate(bitbang->addr); 1066 1067 if (bit) 1068 bb_set(bitbang->addr, bitbang->mdc_msk); 1069 else 1070 bb_clr(bitbang->addr, bitbang->mdc_msk); 1071 } 1072 1073 /* mdio bus control struct */ 1074 static struct mdiobb_ops bb_ops = { 1075 .owner = THIS_MODULE, 1076 .set_mdc = sh_mdc_ctrl, 1077 .set_mdio_dir = sh_mmd_ctrl, 1078 .set_mdio_data = sh_set_mdio, 1079 .get_mdio_data = sh_get_mdio, 1080 }; 1081 1082 /* free skb and descriptor buffer */ 1083 static void sh_eth_ring_free(struct net_device *ndev) 1084 { 1085 struct sh_eth_private *mdp = netdev_priv(ndev); 1086 int i; 1087 1088 /* Free Rx skb ringbuffer */ 1089 if (mdp->rx_skbuff) { 1090 for (i = 0; i < mdp->num_rx_ring; i++) 1091 dev_kfree_skb(mdp->rx_skbuff[i]); 1092 } 1093 kfree(mdp->rx_skbuff); 1094 mdp->rx_skbuff = NULL; 1095 1096 /* Free Tx skb ringbuffer */ 1097 if (mdp->tx_skbuff) { 1098 for (i = 0; i < mdp->num_tx_ring; i++) 1099 dev_kfree_skb(mdp->tx_skbuff[i]); 1100 } 1101 kfree(mdp->tx_skbuff); 1102 mdp->tx_skbuff = NULL; 1103 } 1104 1105 /* format skb and descriptor buffer */ 1106 static void sh_eth_ring_format(struct net_device *ndev) 1107 { 1108 struct sh_eth_private *mdp = netdev_priv(ndev); 1109 int i; 1110 struct sk_buff *skb; 1111 struct sh_eth_rxdesc *rxdesc = NULL; 1112 struct sh_eth_txdesc *txdesc = NULL; 1113 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1114 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1115 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1116 1117 mdp->cur_rx = 0; 1118 mdp->cur_tx = 0; 1119 mdp->dirty_rx = 0; 1120 mdp->dirty_tx = 0; 1121 1122 memset(mdp->rx_ring, 0, rx_ringsize); 1123 1124 /* build Rx ring buffer */ 1125 for (i = 0; i < mdp->num_rx_ring; i++) { 1126 /* skb */ 1127 mdp->rx_skbuff[i] = NULL; 1128 skb = netdev_alloc_skb(ndev, skbuff_size); 1129 mdp->rx_skbuff[i] = skb; 1130 if (skb == NULL) 1131 break; 1132 sh_eth_set_receive_align(skb); 1133 1134 /* RX descriptor */ 1135 rxdesc = &mdp->rx_ring[i]; 1136 /* The size of the buffer is a multiple of 16 bytes. */ 1137 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1138 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, 1139 DMA_FROM_DEVICE); 1140 rxdesc->addr = virt_to_phys(skb->data); 1141 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1142 1143 /* Rx descriptor address set */ 1144 if (i == 0) { 1145 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 1146 if (sh_eth_is_gether(mdp) || 1147 sh_eth_is_rz_fast_ether(mdp)) 1148 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); 1149 } 1150 } 1151 1152 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); 1153 1154 /* Mark the last entry as wrapping the ring. */ 1155 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 1156 1157 memset(mdp->tx_ring, 0, tx_ringsize); 1158 1159 /* build Tx ring buffer */ 1160 for (i = 0; i < mdp->num_tx_ring; i++) { 1161 mdp->tx_skbuff[i] = NULL; 1162 txdesc = &mdp->tx_ring[i]; 1163 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1164 txdesc->buffer_length = 0; 1165 if (i == 0) { 1166 /* Tx descriptor address set */ 1167 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); 1168 if (sh_eth_is_gether(mdp) || 1169 sh_eth_is_rz_fast_ether(mdp)) 1170 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); 1171 } 1172 } 1173 1174 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1175 } 1176 1177 /* Get skb and descriptor buffer */ 1178 static int sh_eth_ring_init(struct net_device *ndev) 1179 { 1180 struct sh_eth_private *mdp = netdev_priv(ndev); 1181 int rx_ringsize, tx_ringsize, ret = 0; 1182 1183 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 1184 * card needs room to do 8 byte alignment, +2 so we can reserve 1185 * the first 2 bytes, and +16 gets room for the status word from the 1186 * card. 1187 */ 1188 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : 1189 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); 1190 if (mdp->cd->rpadir) 1191 mdp->rx_buf_sz += NET_IP_ALIGN; 1192 1193 /* Allocate RX and TX skb rings */ 1194 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring, 1195 sizeof(*mdp->rx_skbuff), GFP_KERNEL); 1196 if (!mdp->rx_skbuff) { 1197 ret = -ENOMEM; 1198 return ret; 1199 } 1200 1201 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring, 1202 sizeof(*mdp->tx_skbuff), GFP_KERNEL); 1203 if (!mdp->tx_skbuff) { 1204 ret = -ENOMEM; 1205 goto skb_ring_free; 1206 } 1207 1208 /* Allocate all Rx descriptors. */ 1209 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1210 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 1211 GFP_KERNEL); 1212 if (!mdp->rx_ring) { 1213 ret = -ENOMEM; 1214 goto desc_ring_free; 1215 } 1216 1217 mdp->dirty_rx = 0; 1218 1219 /* Allocate all Tx descriptors. */ 1220 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1221 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 1222 GFP_KERNEL); 1223 if (!mdp->tx_ring) { 1224 ret = -ENOMEM; 1225 goto desc_ring_free; 1226 } 1227 return ret; 1228 1229 desc_ring_free: 1230 /* free DMA buffer */ 1231 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); 1232 1233 skb_ring_free: 1234 /* Free Rx and Tx skb ring buffer */ 1235 sh_eth_ring_free(ndev); 1236 mdp->tx_ring = NULL; 1237 mdp->rx_ring = NULL; 1238 1239 return ret; 1240 } 1241 1242 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp) 1243 { 1244 int ringsize; 1245 1246 if (mdp->rx_ring) { 1247 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1248 dma_free_coherent(NULL, ringsize, mdp->rx_ring, 1249 mdp->rx_desc_dma); 1250 mdp->rx_ring = NULL; 1251 } 1252 1253 if (mdp->tx_ring) { 1254 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1255 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1256 mdp->tx_desc_dma); 1257 mdp->tx_ring = NULL; 1258 } 1259 } 1260 1261 static int sh_eth_dev_init(struct net_device *ndev, bool start) 1262 { 1263 int ret = 0; 1264 struct sh_eth_private *mdp = netdev_priv(ndev); 1265 u32 val; 1266 1267 /* Soft Reset */ 1268 ret = sh_eth_reset(ndev); 1269 if (ret) 1270 return ret; 1271 1272 if (mdp->cd->rmiimode) 1273 sh_eth_write(ndev, 0x1, RMIIMODE); 1274 1275 /* Descriptor format */ 1276 sh_eth_ring_format(ndev); 1277 if (mdp->cd->rpadir) 1278 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); 1279 1280 /* all sh_eth int mask */ 1281 sh_eth_write(ndev, 0, EESIPR); 1282 1283 #if defined(__LITTLE_ENDIAN) 1284 if (mdp->cd->hw_swap) 1285 sh_eth_write(ndev, EDMR_EL, EDMR); 1286 else 1287 #endif 1288 sh_eth_write(ndev, 0, EDMR); 1289 1290 /* FIFO size set */ 1291 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 1292 sh_eth_write(ndev, 0, TFTR); 1293 1294 /* Frame recv control (enable multiple-packets per rx irq) */ 1295 sh_eth_write(ndev, RMCR_RNC, RMCR); 1296 1297 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1298 1299 if (mdp->cd->bculr) 1300 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 1301 1302 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); 1303 1304 if (!mdp->cd->no_trimd) 1305 sh_eth_write(ndev, 0, TRIMD); 1306 1307 /* Recv frame limit set register */ 1308 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, 1309 RFLR); 1310 1311 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1312 if (start) 1313 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1314 1315 /* PAUSE Prohibition */ 1316 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1317 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 1318 1319 sh_eth_write(ndev, val, ECMR); 1320 1321 if (mdp->cd->set_rate) 1322 mdp->cd->set_rate(ndev); 1323 1324 /* E-MAC Status Register clear */ 1325 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 1326 1327 /* E-MAC Interrupt Enable register */ 1328 if (start) 1329 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 1330 1331 /* Set MAC address */ 1332 update_mac_address(ndev); 1333 1334 /* mask reset */ 1335 if (mdp->cd->apr) 1336 sh_eth_write(ndev, APR_AP, APR); 1337 if (mdp->cd->mpr) 1338 sh_eth_write(ndev, MPR_MP, MPR); 1339 if (mdp->cd->tpauser) 1340 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 1341 1342 if (start) { 1343 /* Setting the Rx mode will start the Rx process. */ 1344 sh_eth_write(ndev, EDRRR_R, EDRRR); 1345 1346 netif_start_queue(ndev); 1347 } 1348 1349 return ret; 1350 } 1351 1352 /* free Tx skb function */ 1353 static int sh_eth_txfree(struct net_device *ndev) 1354 { 1355 struct sh_eth_private *mdp = netdev_priv(ndev); 1356 struct sh_eth_txdesc *txdesc; 1357 int free_num = 0; 1358 int entry = 0; 1359 1360 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1361 entry = mdp->dirty_tx % mdp->num_tx_ring; 1362 txdesc = &mdp->tx_ring[entry]; 1363 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1364 break; 1365 /* Free the original skb. */ 1366 if (mdp->tx_skbuff[entry]) { 1367 dma_unmap_single(&ndev->dev, txdesc->addr, 1368 txdesc->buffer_length, DMA_TO_DEVICE); 1369 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1370 mdp->tx_skbuff[entry] = NULL; 1371 free_num++; 1372 } 1373 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1374 if (entry >= mdp->num_tx_ring - 1) 1375 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1376 1377 ndev->stats.tx_packets++; 1378 ndev->stats.tx_bytes += txdesc->buffer_length; 1379 } 1380 return free_num; 1381 } 1382 1383 /* Packet receive function */ 1384 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) 1385 { 1386 struct sh_eth_private *mdp = netdev_priv(ndev); 1387 struct sh_eth_rxdesc *rxdesc; 1388 1389 int entry = mdp->cur_rx % mdp->num_rx_ring; 1390 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; 1391 int limit; 1392 struct sk_buff *skb; 1393 u16 pkt_len = 0; 1394 u32 desc_status; 1395 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1396 1397 boguscnt = min(boguscnt, *quota); 1398 limit = boguscnt; 1399 rxdesc = &mdp->rx_ring[entry]; 1400 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1401 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1402 pkt_len = rxdesc->frame_length; 1403 1404 if (--boguscnt < 0) 1405 break; 1406 1407 if (!(desc_status & RDFEND)) 1408 ndev->stats.rx_length_errors++; 1409 1410 /* In case of almost all GETHER/ETHERs, the Receive Frame State 1411 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to 1412 * bit 0. However, in case of the R8A7740, R8A779x, and 1413 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the 1414 * driver needs right shifting by 16. 1415 */ 1416 if (mdp->cd->shift_rd0) 1417 desc_status >>= 16; 1418 1419 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1420 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1421 ndev->stats.rx_errors++; 1422 if (desc_status & RD_RFS1) 1423 ndev->stats.rx_crc_errors++; 1424 if (desc_status & RD_RFS2) 1425 ndev->stats.rx_frame_errors++; 1426 if (desc_status & RD_RFS3) 1427 ndev->stats.rx_length_errors++; 1428 if (desc_status & RD_RFS4) 1429 ndev->stats.rx_length_errors++; 1430 if (desc_status & RD_RFS6) 1431 ndev->stats.rx_missed_errors++; 1432 if (desc_status & RD_RFS10) 1433 ndev->stats.rx_over_errors++; 1434 } else { 1435 if (!mdp->cd->hw_swap) 1436 sh_eth_soft_swap( 1437 phys_to_virt(ALIGN(rxdesc->addr, 4)), 1438 pkt_len + 2); 1439 skb = mdp->rx_skbuff[entry]; 1440 mdp->rx_skbuff[entry] = NULL; 1441 if (mdp->cd->rpadir) 1442 skb_reserve(skb, NET_IP_ALIGN); 1443 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1444 ALIGN(mdp->rx_buf_sz, 16), 1445 DMA_FROM_DEVICE); 1446 skb_put(skb, pkt_len); 1447 skb->protocol = eth_type_trans(skb, ndev); 1448 netif_receive_skb(skb); 1449 ndev->stats.rx_packets++; 1450 ndev->stats.rx_bytes += pkt_len; 1451 } 1452 entry = (++mdp->cur_rx) % mdp->num_rx_ring; 1453 rxdesc = &mdp->rx_ring[entry]; 1454 } 1455 1456 /* Refill the Rx ring buffers. */ 1457 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1458 entry = mdp->dirty_rx % mdp->num_rx_ring; 1459 rxdesc = &mdp->rx_ring[entry]; 1460 /* The size of the buffer is 16 byte boundary. */ 1461 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1462 1463 if (mdp->rx_skbuff[entry] == NULL) { 1464 skb = netdev_alloc_skb(ndev, skbuff_size); 1465 mdp->rx_skbuff[entry] = skb; 1466 if (skb == NULL) 1467 break; /* Better luck next round. */ 1468 sh_eth_set_receive_align(skb); 1469 dma_map_single(&ndev->dev, skb->data, 1470 rxdesc->buffer_length, DMA_FROM_DEVICE); 1471 1472 skb_checksum_none_assert(skb); 1473 rxdesc->addr = virt_to_phys(skb->data); 1474 } 1475 if (entry >= mdp->num_rx_ring - 1) 1476 rxdesc->status |= 1477 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1478 else 1479 rxdesc->status |= 1480 cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1481 } 1482 1483 /* Restart Rx engine if stopped. */ 1484 /* If we don't need to check status, don't. -KDU */ 1485 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1486 /* fix the values for the next receiving if RDE is set */ 1487 if (intr_status & EESR_RDE) { 1488 u32 count = (sh_eth_read(ndev, RDFAR) - 1489 sh_eth_read(ndev, RDLAR)) >> 4; 1490 1491 mdp->cur_rx = count; 1492 mdp->dirty_rx = count; 1493 } 1494 sh_eth_write(ndev, EDRRR_R, EDRRR); 1495 } 1496 1497 *quota -= limit - boguscnt - 1; 1498 1499 return *quota <= 0; 1500 } 1501 1502 static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1503 { 1504 /* disable tx and rx */ 1505 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & 1506 ~(ECMR_RE | ECMR_TE), ECMR); 1507 } 1508 1509 static void sh_eth_rcv_snd_enable(struct net_device *ndev) 1510 { 1511 /* enable tx and rx */ 1512 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | 1513 (ECMR_RE | ECMR_TE), ECMR); 1514 } 1515 1516 /* error control function */ 1517 static void sh_eth_error(struct net_device *ndev, int intr_status) 1518 { 1519 struct sh_eth_private *mdp = netdev_priv(ndev); 1520 u32 felic_stat; 1521 u32 link_stat; 1522 u32 mask; 1523 1524 if (intr_status & EESR_ECI) { 1525 felic_stat = sh_eth_read(ndev, ECSR); 1526 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ 1527 if (felic_stat & ECSR_ICD) 1528 ndev->stats.tx_carrier_errors++; 1529 if (felic_stat & ECSR_LCHNG) { 1530 /* Link Changed */ 1531 if (mdp->cd->no_psr || mdp->no_ether_link) { 1532 goto ignore_link; 1533 } else { 1534 link_stat = (sh_eth_read(ndev, PSR)); 1535 if (mdp->ether_link_active_low) 1536 link_stat = ~link_stat; 1537 } 1538 if (!(link_stat & PHY_ST_LINK)) { 1539 sh_eth_rcv_snd_disable(ndev); 1540 } else { 1541 /* Link Up */ 1542 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & 1543 ~DMAC_M_ECI, EESIPR); 1544 /* clear int */ 1545 sh_eth_write(ndev, sh_eth_read(ndev, ECSR), 1546 ECSR); 1547 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | 1548 DMAC_M_ECI, EESIPR); 1549 /* enable tx and rx */ 1550 sh_eth_rcv_snd_enable(ndev); 1551 } 1552 } 1553 } 1554 1555 ignore_link: 1556 if (intr_status & EESR_TWB) { 1557 /* Unused write back interrupt */ 1558 if (intr_status & EESR_TABT) { /* Transmit Abort int */ 1559 ndev->stats.tx_aborted_errors++; 1560 netif_err(mdp, tx_err, ndev, "Transmit Abort\n"); 1561 } 1562 } 1563 1564 if (intr_status & EESR_RABT) { 1565 /* Receive Abort int */ 1566 if (intr_status & EESR_RFRMER) { 1567 /* Receive Frame Overflow int */ 1568 ndev->stats.rx_frame_errors++; 1569 netif_err(mdp, rx_err, ndev, "Receive Abort\n"); 1570 } 1571 } 1572 1573 if (intr_status & EESR_TDE) { 1574 /* Transmit Descriptor Empty int */ 1575 ndev->stats.tx_fifo_errors++; 1576 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n"); 1577 } 1578 1579 if (intr_status & EESR_TFE) { 1580 /* FIFO under flow */ 1581 ndev->stats.tx_fifo_errors++; 1582 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n"); 1583 } 1584 1585 if (intr_status & EESR_RDE) { 1586 /* Receive Descriptor Empty int */ 1587 ndev->stats.rx_over_errors++; 1588 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n"); 1589 } 1590 1591 if (intr_status & EESR_RFE) { 1592 /* Receive FIFO Overflow int */ 1593 ndev->stats.rx_fifo_errors++; 1594 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n"); 1595 } 1596 1597 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1598 /* Address Error */ 1599 ndev->stats.tx_fifo_errors++; 1600 netif_err(mdp, tx_err, ndev, "Address Error\n"); 1601 } 1602 1603 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1604 if (mdp->cd->no_ade) 1605 mask &= ~EESR_ADE; 1606 if (intr_status & mask) { 1607 /* Tx error */ 1608 u32 edtrr = sh_eth_read(ndev, EDTRR); 1609 1610 /* dmesg */ 1611 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 1612 intr_status, mdp->cur_tx, mdp->dirty_tx, 1613 (u32)ndev->state, edtrr); 1614 /* dirty buffer free */ 1615 sh_eth_txfree(ndev); 1616 1617 /* SH7712 BUG */ 1618 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1619 /* tx dma start */ 1620 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1621 } 1622 /* wakeup */ 1623 netif_wake_queue(ndev); 1624 } 1625 } 1626 1627 static irqreturn_t sh_eth_interrupt(int irq, void *netdev) 1628 { 1629 struct net_device *ndev = netdev; 1630 struct sh_eth_private *mdp = netdev_priv(ndev); 1631 struct sh_eth_cpu_data *cd = mdp->cd; 1632 irqreturn_t ret = IRQ_NONE; 1633 unsigned long intr_status, intr_enable; 1634 1635 spin_lock(&mdp->lock); 1636 1637 /* Get interrupt status */ 1638 intr_status = sh_eth_read(ndev, EESR); 1639 /* Mask it with the interrupt mask, forcing ECI interrupt to be always 1640 * enabled since it's the one that comes thru regardless of the mask, 1641 * and we need to fully handle it in sh_eth_error() in order to quench 1642 * it as it doesn't get cleared by just writing 1 to the ECI bit... 1643 */ 1644 intr_enable = sh_eth_read(ndev, EESIPR); 1645 intr_status &= intr_enable | DMAC_M_ECI; 1646 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) 1647 ret = IRQ_HANDLED; 1648 else 1649 goto other_irq; 1650 1651 if (intr_status & EESR_RX_CHECK) { 1652 if (napi_schedule_prep(&mdp->napi)) { 1653 /* Mask Rx interrupts */ 1654 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK, 1655 EESIPR); 1656 __napi_schedule(&mdp->napi); 1657 } else { 1658 netdev_warn(ndev, 1659 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n", 1660 intr_status, intr_enable); 1661 } 1662 } 1663 1664 /* Tx Check */ 1665 if (intr_status & cd->tx_check) { 1666 /* Clear Tx interrupts */ 1667 sh_eth_write(ndev, intr_status & cd->tx_check, EESR); 1668 1669 sh_eth_txfree(ndev); 1670 netif_wake_queue(ndev); 1671 } 1672 1673 if (intr_status & cd->eesr_err_check) { 1674 /* Clear error interrupts */ 1675 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); 1676 1677 sh_eth_error(ndev, intr_status); 1678 } 1679 1680 other_irq: 1681 spin_unlock(&mdp->lock); 1682 1683 return ret; 1684 } 1685 1686 static int sh_eth_poll(struct napi_struct *napi, int budget) 1687 { 1688 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, 1689 napi); 1690 struct net_device *ndev = napi->dev; 1691 int quota = budget; 1692 unsigned long intr_status; 1693 1694 for (;;) { 1695 intr_status = sh_eth_read(ndev, EESR); 1696 if (!(intr_status & EESR_RX_CHECK)) 1697 break; 1698 /* Clear Rx interrupts */ 1699 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR); 1700 1701 if (sh_eth_rx(ndev, intr_status, "a)) 1702 goto out; 1703 } 1704 1705 napi_complete(napi); 1706 1707 /* Reenable Rx interrupts */ 1708 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1709 out: 1710 return budget - quota; 1711 } 1712 1713 /* PHY state control function */ 1714 static void sh_eth_adjust_link(struct net_device *ndev) 1715 { 1716 struct sh_eth_private *mdp = netdev_priv(ndev); 1717 struct phy_device *phydev = mdp->phydev; 1718 int new_state = 0; 1719 1720 if (phydev->link) { 1721 if (phydev->duplex != mdp->duplex) { 1722 new_state = 1; 1723 mdp->duplex = phydev->duplex; 1724 if (mdp->cd->set_duplex) 1725 mdp->cd->set_duplex(ndev); 1726 } 1727 1728 if (phydev->speed != mdp->speed) { 1729 new_state = 1; 1730 mdp->speed = phydev->speed; 1731 if (mdp->cd->set_rate) 1732 mdp->cd->set_rate(ndev); 1733 } 1734 if (!mdp->link) { 1735 sh_eth_write(ndev, 1736 sh_eth_read(ndev, ECMR) & ~ECMR_TXF, 1737 ECMR); 1738 new_state = 1; 1739 mdp->link = phydev->link; 1740 if (mdp->cd->no_psr || mdp->no_ether_link) 1741 sh_eth_rcv_snd_enable(ndev); 1742 } 1743 } else if (mdp->link) { 1744 new_state = 1; 1745 mdp->link = 0; 1746 mdp->speed = 0; 1747 mdp->duplex = -1; 1748 if (mdp->cd->no_psr || mdp->no_ether_link) 1749 sh_eth_rcv_snd_disable(ndev); 1750 } 1751 1752 if (new_state && netif_msg_link(mdp)) 1753 phy_print_status(phydev); 1754 } 1755 1756 /* PHY init function */ 1757 static int sh_eth_phy_init(struct net_device *ndev) 1758 { 1759 struct device_node *np = ndev->dev.parent->of_node; 1760 struct sh_eth_private *mdp = netdev_priv(ndev); 1761 struct phy_device *phydev = NULL; 1762 1763 mdp->link = 0; 1764 mdp->speed = 0; 1765 mdp->duplex = -1; 1766 1767 /* Try connect to PHY */ 1768 if (np) { 1769 struct device_node *pn; 1770 1771 pn = of_parse_phandle(np, "phy-handle", 0); 1772 phydev = of_phy_connect(ndev, pn, 1773 sh_eth_adjust_link, 0, 1774 mdp->phy_interface); 1775 1776 if (!phydev) 1777 phydev = ERR_PTR(-ENOENT); 1778 } else { 1779 char phy_id[MII_BUS_ID_SIZE + 3]; 1780 1781 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 1782 mdp->mii_bus->id, mdp->phy_id); 1783 1784 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1785 mdp->phy_interface); 1786 } 1787 1788 if (IS_ERR(phydev)) { 1789 netdev_err(ndev, "failed to connect PHY\n"); 1790 return PTR_ERR(phydev); 1791 } 1792 1793 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", 1794 phydev->addr, phydev->irq, phydev->drv->name); 1795 1796 mdp->phydev = phydev; 1797 1798 return 0; 1799 } 1800 1801 /* PHY control start function */ 1802 static int sh_eth_phy_start(struct net_device *ndev) 1803 { 1804 struct sh_eth_private *mdp = netdev_priv(ndev); 1805 int ret; 1806 1807 ret = sh_eth_phy_init(ndev); 1808 if (ret) 1809 return ret; 1810 1811 phy_start(mdp->phydev); 1812 1813 return 0; 1814 } 1815 1816 static int sh_eth_get_settings(struct net_device *ndev, 1817 struct ethtool_cmd *ecmd) 1818 { 1819 struct sh_eth_private *mdp = netdev_priv(ndev); 1820 unsigned long flags; 1821 int ret; 1822 1823 spin_lock_irqsave(&mdp->lock, flags); 1824 ret = phy_ethtool_gset(mdp->phydev, ecmd); 1825 spin_unlock_irqrestore(&mdp->lock, flags); 1826 1827 return ret; 1828 } 1829 1830 static int sh_eth_set_settings(struct net_device *ndev, 1831 struct ethtool_cmd *ecmd) 1832 { 1833 struct sh_eth_private *mdp = netdev_priv(ndev); 1834 unsigned long flags; 1835 int ret; 1836 1837 spin_lock_irqsave(&mdp->lock, flags); 1838 1839 /* disable tx and rx */ 1840 sh_eth_rcv_snd_disable(ndev); 1841 1842 ret = phy_ethtool_sset(mdp->phydev, ecmd); 1843 if (ret) 1844 goto error_exit; 1845 1846 if (ecmd->duplex == DUPLEX_FULL) 1847 mdp->duplex = 1; 1848 else 1849 mdp->duplex = 0; 1850 1851 if (mdp->cd->set_duplex) 1852 mdp->cd->set_duplex(ndev); 1853 1854 error_exit: 1855 mdelay(1); 1856 1857 /* enable tx and rx */ 1858 sh_eth_rcv_snd_enable(ndev); 1859 1860 spin_unlock_irqrestore(&mdp->lock, flags); 1861 1862 return ret; 1863 } 1864 1865 static int sh_eth_nway_reset(struct net_device *ndev) 1866 { 1867 struct sh_eth_private *mdp = netdev_priv(ndev); 1868 unsigned long flags; 1869 int ret; 1870 1871 spin_lock_irqsave(&mdp->lock, flags); 1872 ret = phy_start_aneg(mdp->phydev); 1873 spin_unlock_irqrestore(&mdp->lock, flags); 1874 1875 return ret; 1876 } 1877 1878 static u32 sh_eth_get_msglevel(struct net_device *ndev) 1879 { 1880 struct sh_eth_private *mdp = netdev_priv(ndev); 1881 return mdp->msg_enable; 1882 } 1883 1884 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) 1885 { 1886 struct sh_eth_private *mdp = netdev_priv(ndev); 1887 mdp->msg_enable = value; 1888 } 1889 1890 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { 1891 "rx_current", "tx_current", 1892 "rx_dirty", "tx_dirty", 1893 }; 1894 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) 1895 1896 static int sh_eth_get_sset_count(struct net_device *netdev, int sset) 1897 { 1898 switch (sset) { 1899 case ETH_SS_STATS: 1900 return SH_ETH_STATS_LEN; 1901 default: 1902 return -EOPNOTSUPP; 1903 } 1904 } 1905 1906 static void sh_eth_get_ethtool_stats(struct net_device *ndev, 1907 struct ethtool_stats *stats, u64 *data) 1908 { 1909 struct sh_eth_private *mdp = netdev_priv(ndev); 1910 int i = 0; 1911 1912 /* device-specific stats */ 1913 data[i++] = mdp->cur_rx; 1914 data[i++] = mdp->cur_tx; 1915 data[i++] = mdp->dirty_rx; 1916 data[i++] = mdp->dirty_tx; 1917 } 1918 1919 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1920 { 1921 switch (stringset) { 1922 case ETH_SS_STATS: 1923 memcpy(data, *sh_eth_gstrings_stats, 1924 sizeof(sh_eth_gstrings_stats)); 1925 break; 1926 } 1927 } 1928 1929 static void sh_eth_get_ringparam(struct net_device *ndev, 1930 struct ethtool_ringparam *ring) 1931 { 1932 struct sh_eth_private *mdp = netdev_priv(ndev); 1933 1934 ring->rx_max_pending = RX_RING_MAX; 1935 ring->tx_max_pending = TX_RING_MAX; 1936 ring->rx_pending = mdp->num_rx_ring; 1937 ring->tx_pending = mdp->num_tx_ring; 1938 } 1939 1940 static int sh_eth_set_ringparam(struct net_device *ndev, 1941 struct ethtool_ringparam *ring) 1942 { 1943 struct sh_eth_private *mdp = netdev_priv(ndev); 1944 int ret; 1945 1946 if (ring->tx_pending > TX_RING_MAX || 1947 ring->rx_pending > RX_RING_MAX || 1948 ring->tx_pending < TX_RING_MIN || 1949 ring->rx_pending < RX_RING_MIN) 1950 return -EINVAL; 1951 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 1952 return -EINVAL; 1953 1954 if (netif_running(ndev)) { 1955 netif_tx_disable(ndev); 1956 /* Disable interrupts by clearing the interrupt mask. */ 1957 sh_eth_write(ndev, 0x0000, EESIPR); 1958 /* Stop the chip's Tx and Rx processes. */ 1959 sh_eth_write(ndev, 0, EDTRR); 1960 sh_eth_write(ndev, 0, EDRRR); 1961 synchronize_irq(ndev->irq); 1962 } 1963 1964 /* Free all the skbuffs in the Rx queue. */ 1965 sh_eth_ring_free(ndev); 1966 /* Free DMA buffer */ 1967 sh_eth_free_dma_buffer(mdp); 1968 1969 /* Set new parameters */ 1970 mdp->num_rx_ring = ring->rx_pending; 1971 mdp->num_tx_ring = ring->tx_pending; 1972 1973 ret = sh_eth_ring_init(ndev); 1974 if (ret < 0) { 1975 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__); 1976 return ret; 1977 } 1978 ret = sh_eth_dev_init(ndev, false); 1979 if (ret < 0) { 1980 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); 1981 return ret; 1982 } 1983 1984 if (netif_running(ndev)) { 1985 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1986 /* Setting the Rx mode will start the Rx process. */ 1987 sh_eth_write(ndev, EDRRR_R, EDRRR); 1988 netif_wake_queue(ndev); 1989 } 1990 1991 return 0; 1992 } 1993 1994 static const struct ethtool_ops sh_eth_ethtool_ops = { 1995 .get_settings = sh_eth_get_settings, 1996 .set_settings = sh_eth_set_settings, 1997 .nway_reset = sh_eth_nway_reset, 1998 .get_msglevel = sh_eth_get_msglevel, 1999 .set_msglevel = sh_eth_set_msglevel, 2000 .get_link = ethtool_op_get_link, 2001 .get_strings = sh_eth_get_strings, 2002 .get_ethtool_stats = sh_eth_get_ethtool_stats, 2003 .get_sset_count = sh_eth_get_sset_count, 2004 .get_ringparam = sh_eth_get_ringparam, 2005 .set_ringparam = sh_eth_set_ringparam, 2006 }; 2007 2008 /* network device open function */ 2009 static int sh_eth_open(struct net_device *ndev) 2010 { 2011 int ret = 0; 2012 struct sh_eth_private *mdp = netdev_priv(ndev); 2013 2014 pm_runtime_get_sync(&mdp->pdev->dev); 2015 2016 napi_enable(&mdp->napi); 2017 2018 ret = request_irq(ndev->irq, sh_eth_interrupt, 2019 mdp->cd->irq_flags, ndev->name, ndev); 2020 if (ret) { 2021 netdev_err(ndev, "Can not assign IRQ number\n"); 2022 goto out_napi_off; 2023 } 2024 2025 /* Descriptor set */ 2026 ret = sh_eth_ring_init(ndev); 2027 if (ret) 2028 goto out_free_irq; 2029 2030 /* device init */ 2031 ret = sh_eth_dev_init(ndev, true); 2032 if (ret) 2033 goto out_free_irq; 2034 2035 /* PHY control start*/ 2036 ret = sh_eth_phy_start(ndev); 2037 if (ret) 2038 goto out_free_irq; 2039 2040 mdp->is_opened = 1; 2041 2042 return ret; 2043 2044 out_free_irq: 2045 free_irq(ndev->irq, ndev); 2046 out_napi_off: 2047 napi_disable(&mdp->napi); 2048 pm_runtime_put_sync(&mdp->pdev->dev); 2049 return ret; 2050 } 2051 2052 /* Timeout function */ 2053 static void sh_eth_tx_timeout(struct net_device *ndev) 2054 { 2055 struct sh_eth_private *mdp = netdev_priv(ndev); 2056 struct sh_eth_rxdesc *rxdesc; 2057 int i; 2058 2059 netif_stop_queue(ndev); 2060 2061 netif_err(mdp, timer, ndev, 2062 "transmit timed out, status %8.8x, resetting...\n", 2063 (int)sh_eth_read(ndev, EESR)); 2064 2065 /* tx_errors count up */ 2066 ndev->stats.tx_errors++; 2067 2068 /* Free all the skbuffs in the Rx queue. */ 2069 for (i = 0; i < mdp->num_rx_ring; i++) { 2070 rxdesc = &mdp->rx_ring[i]; 2071 rxdesc->status = 0; 2072 rxdesc->addr = 0xBADF00D0; 2073 dev_kfree_skb(mdp->rx_skbuff[i]); 2074 mdp->rx_skbuff[i] = NULL; 2075 } 2076 for (i = 0; i < mdp->num_tx_ring; i++) { 2077 dev_kfree_skb(mdp->tx_skbuff[i]); 2078 mdp->tx_skbuff[i] = NULL; 2079 } 2080 2081 /* device init */ 2082 sh_eth_dev_init(ndev, true); 2083 } 2084 2085 /* Packet transmit function */ 2086 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) 2087 { 2088 struct sh_eth_private *mdp = netdev_priv(ndev); 2089 struct sh_eth_txdesc *txdesc; 2090 u32 entry; 2091 unsigned long flags; 2092 2093 spin_lock_irqsave(&mdp->lock, flags); 2094 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2095 if (!sh_eth_txfree(ndev)) { 2096 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); 2097 netif_stop_queue(ndev); 2098 spin_unlock_irqrestore(&mdp->lock, flags); 2099 return NETDEV_TX_BUSY; 2100 } 2101 } 2102 spin_unlock_irqrestore(&mdp->lock, flags); 2103 2104 entry = mdp->cur_tx % mdp->num_tx_ring; 2105 mdp->tx_skbuff[entry] = skb; 2106 txdesc = &mdp->tx_ring[entry]; 2107 /* soft swap. */ 2108 if (!mdp->cd->hw_swap) 2109 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 2110 skb->len + 2); 2111 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2112 DMA_TO_DEVICE); 2113 if (skb->len < ETH_ZLEN) 2114 txdesc->buffer_length = ETH_ZLEN; 2115 else 2116 txdesc->buffer_length = skb->len; 2117 2118 if (entry >= mdp->num_tx_ring - 1) 2119 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2120 else 2121 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 2122 2123 mdp->cur_tx++; 2124 2125 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) 2126 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 2127 2128 return NETDEV_TX_OK; 2129 } 2130 2131 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 2132 { 2133 struct sh_eth_private *mdp = netdev_priv(ndev); 2134 2135 if (sh_eth_is_rz_fast_ether(mdp)) 2136 return &ndev->stats; 2137 2138 if (!mdp->is_opened) 2139 return &ndev->stats; 2140 2141 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); 2142 sh_eth_write(ndev, 0, TROCR); /* (write clear) */ 2143 ndev->stats.collisions += sh_eth_read(ndev, CDCR); 2144 sh_eth_write(ndev, 0, CDCR); /* (write clear) */ 2145 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); 2146 sh_eth_write(ndev, 0, LCCR); /* (write clear) */ 2147 2148 if (sh_eth_is_gether(mdp)) { 2149 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); 2150 sh_eth_write(ndev, 0, CERCR); /* (write clear) */ 2151 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); 2152 sh_eth_write(ndev, 0, CEECR); /* (write clear) */ 2153 } else { 2154 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); 2155 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ 2156 } 2157 2158 return &ndev->stats; 2159 } 2160 2161 /* device close function */ 2162 static int sh_eth_close(struct net_device *ndev) 2163 { 2164 struct sh_eth_private *mdp = netdev_priv(ndev); 2165 2166 netif_stop_queue(ndev); 2167 2168 /* Disable interrupts by clearing the interrupt mask. */ 2169 sh_eth_write(ndev, 0x0000, EESIPR); 2170 2171 /* Stop the chip's Tx and Rx processes. */ 2172 sh_eth_write(ndev, 0, EDTRR); 2173 sh_eth_write(ndev, 0, EDRRR); 2174 2175 sh_eth_get_stats(ndev); 2176 /* PHY Disconnect */ 2177 if (mdp->phydev) { 2178 phy_stop(mdp->phydev); 2179 phy_disconnect(mdp->phydev); 2180 } 2181 2182 free_irq(ndev->irq, ndev); 2183 2184 napi_disable(&mdp->napi); 2185 2186 /* Free all the skbuffs in the Rx queue. */ 2187 sh_eth_ring_free(ndev); 2188 2189 /* free DMA buffer */ 2190 sh_eth_free_dma_buffer(mdp); 2191 2192 pm_runtime_put_sync(&mdp->pdev->dev); 2193 2194 mdp->is_opened = 0; 2195 2196 return 0; 2197 } 2198 2199 /* ioctl to device function */ 2200 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2201 { 2202 struct sh_eth_private *mdp = netdev_priv(ndev); 2203 struct phy_device *phydev = mdp->phydev; 2204 2205 if (!netif_running(ndev)) 2206 return -EINVAL; 2207 2208 if (!phydev) 2209 return -ENODEV; 2210 2211 return phy_mii_ioctl(phydev, rq, cmd); 2212 } 2213 2214 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ 2215 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, 2216 int entry) 2217 { 2218 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); 2219 } 2220 2221 static u32 sh_eth_tsu_get_post_mask(int entry) 2222 { 2223 return 0x0f << (28 - ((entry % 8) * 4)); 2224 } 2225 2226 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) 2227 { 2228 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); 2229 } 2230 2231 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, 2232 int entry) 2233 { 2234 struct sh_eth_private *mdp = netdev_priv(ndev); 2235 u32 tmp; 2236 void *reg_offset; 2237 2238 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 2239 tmp = ioread32(reg_offset); 2240 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); 2241 } 2242 2243 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, 2244 int entry) 2245 { 2246 struct sh_eth_private *mdp = netdev_priv(ndev); 2247 u32 post_mask, ref_mask, tmp; 2248 void *reg_offset; 2249 2250 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 2251 post_mask = sh_eth_tsu_get_post_mask(entry); 2252 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; 2253 2254 tmp = ioread32(reg_offset); 2255 iowrite32(tmp & ~post_mask, reg_offset); 2256 2257 /* If other port enables, the function returns "true" */ 2258 return tmp & ref_mask; 2259 } 2260 2261 static int sh_eth_tsu_busy(struct net_device *ndev) 2262 { 2263 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; 2264 struct sh_eth_private *mdp = netdev_priv(ndev); 2265 2266 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { 2267 udelay(10); 2268 timeout--; 2269 if (timeout <= 0) { 2270 netdev_err(ndev, "%s: timeout\n", __func__); 2271 return -ETIMEDOUT; 2272 } 2273 } 2274 2275 return 0; 2276 } 2277 2278 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, 2279 const u8 *addr) 2280 { 2281 u32 val; 2282 2283 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; 2284 iowrite32(val, reg); 2285 if (sh_eth_tsu_busy(ndev) < 0) 2286 return -EBUSY; 2287 2288 val = addr[4] << 8 | addr[5]; 2289 iowrite32(val, reg + 4); 2290 if (sh_eth_tsu_busy(ndev) < 0) 2291 return -EBUSY; 2292 2293 return 0; 2294 } 2295 2296 static void sh_eth_tsu_read_entry(void *reg, u8 *addr) 2297 { 2298 u32 val; 2299 2300 val = ioread32(reg); 2301 addr[0] = (val >> 24) & 0xff; 2302 addr[1] = (val >> 16) & 0xff; 2303 addr[2] = (val >> 8) & 0xff; 2304 addr[3] = val & 0xff; 2305 val = ioread32(reg + 4); 2306 addr[4] = (val >> 8) & 0xff; 2307 addr[5] = val & 0xff; 2308 } 2309 2310 2311 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) 2312 { 2313 struct sh_eth_private *mdp = netdev_priv(ndev); 2314 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2315 int i; 2316 u8 c_addr[ETH_ALEN]; 2317 2318 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 2319 sh_eth_tsu_read_entry(reg_offset, c_addr); 2320 if (ether_addr_equal(addr, c_addr)) 2321 return i; 2322 } 2323 2324 return -ENOENT; 2325 } 2326 2327 static int sh_eth_tsu_find_empty(struct net_device *ndev) 2328 { 2329 u8 blank[ETH_ALEN]; 2330 int entry; 2331 2332 memset(blank, 0, sizeof(blank)); 2333 entry = sh_eth_tsu_find_entry(ndev, blank); 2334 return (entry < 0) ? -ENOMEM : entry; 2335 } 2336 2337 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, 2338 int entry) 2339 { 2340 struct sh_eth_private *mdp = netdev_priv(ndev); 2341 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2342 int ret; 2343 u8 blank[ETH_ALEN]; 2344 2345 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & 2346 ~(1 << (31 - entry)), TSU_TEN); 2347 2348 memset(blank, 0, sizeof(blank)); 2349 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); 2350 if (ret < 0) 2351 return ret; 2352 return 0; 2353 } 2354 2355 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) 2356 { 2357 struct sh_eth_private *mdp = netdev_priv(ndev); 2358 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2359 int i, ret; 2360 2361 if (!mdp->cd->tsu) 2362 return 0; 2363 2364 i = sh_eth_tsu_find_entry(ndev, addr); 2365 if (i < 0) { 2366 /* No entry found, create one */ 2367 i = sh_eth_tsu_find_empty(ndev); 2368 if (i < 0) 2369 return -ENOMEM; 2370 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); 2371 if (ret < 0) 2372 return ret; 2373 2374 /* Enable the entry */ 2375 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | 2376 (1 << (31 - i)), TSU_TEN); 2377 } 2378 2379 /* Entry found or created, enable POST */ 2380 sh_eth_tsu_enable_cam_entry_post(ndev, i); 2381 2382 return 0; 2383 } 2384 2385 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) 2386 { 2387 struct sh_eth_private *mdp = netdev_priv(ndev); 2388 int i, ret; 2389 2390 if (!mdp->cd->tsu) 2391 return 0; 2392 2393 i = sh_eth_tsu_find_entry(ndev, addr); 2394 if (i) { 2395 /* Entry found */ 2396 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 2397 goto done; 2398 2399 /* Disable the entry if both ports was disabled */ 2400 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 2401 if (ret < 0) 2402 return ret; 2403 } 2404 done: 2405 return 0; 2406 } 2407 2408 static int sh_eth_tsu_purge_all(struct net_device *ndev) 2409 { 2410 struct sh_eth_private *mdp = netdev_priv(ndev); 2411 int i, ret; 2412 2413 if (unlikely(!mdp->cd->tsu)) 2414 return 0; 2415 2416 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { 2417 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 2418 continue; 2419 2420 /* Disable the entry if both ports was disabled */ 2421 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 2422 if (ret < 0) 2423 return ret; 2424 } 2425 2426 return 0; 2427 } 2428 2429 static void sh_eth_tsu_purge_mcast(struct net_device *ndev) 2430 { 2431 struct sh_eth_private *mdp = netdev_priv(ndev); 2432 u8 addr[ETH_ALEN]; 2433 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2434 int i; 2435 2436 if (unlikely(!mdp->cd->tsu)) 2437 return; 2438 2439 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 2440 sh_eth_tsu_read_entry(reg_offset, addr); 2441 if (is_multicast_ether_addr(addr)) 2442 sh_eth_tsu_del_entry(ndev, addr); 2443 } 2444 } 2445 2446 /* Multicast reception directions set */ 2447 static void sh_eth_set_multicast_list(struct net_device *ndev) 2448 { 2449 struct sh_eth_private *mdp = netdev_priv(ndev); 2450 u32 ecmr_bits; 2451 int mcast_all = 0; 2452 unsigned long flags; 2453 2454 spin_lock_irqsave(&mdp->lock, flags); 2455 /* Initial condition is MCT = 1, PRM = 0. 2456 * Depending on ndev->flags, set PRM or clear MCT 2457 */ 2458 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; 2459 2460 if (!(ndev->flags & IFF_MULTICAST)) { 2461 sh_eth_tsu_purge_mcast(ndev); 2462 mcast_all = 1; 2463 } 2464 if (ndev->flags & IFF_ALLMULTI) { 2465 sh_eth_tsu_purge_mcast(ndev); 2466 ecmr_bits &= ~ECMR_MCT; 2467 mcast_all = 1; 2468 } 2469 2470 if (ndev->flags & IFF_PROMISC) { 2471 sh_eth_tsu_purge_all(ndev); 2472 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; 2473 } else if (mdp->cd->tsu) { 2474 struct netdev_hw_addr *ha; 2475 netdev_for_each_mc_addr(ha, ndev) { 2476 if (mcast_all && is_multicast_ether_addr(ha->addr)) 2477 continue; 2478 2479 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { 2480 if (!mcast_all) { 2481 sh_eth_tsu_purge_mcast(ndev); 2482 ecmr_bits &= ~ECMR_MCT; 2483 mcast_all = 1; 2484 } 2485 } 2486 } 2487 } else { 2488 /* Normal, unicast/broadcast-only mode. */ 2489 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; 2490 } 2491 2492 /* update the ethernet mode */ 2493 sh_eth_write(ndev, ecmr_bits, ECMR); 2494 2495 spin_unlock_irqrestore(&mdp->lock, flags); 2496 } 2497 2498 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) 2499 { 2500 if (!mdp->port) 2501 return TSU_VTAG0; 2502 else 2503 return TSU_VTAG1; 2504 } 2505 2506 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, 2507 __be16 proto, u16 vid) 2508 { 2509 struct sh_eth_private *mdp = netdev_priv(ndev); 2510 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2511 2512 if (unlikely(!mdp->cd->tsu)) 2513 return -EPERM; 2514 2515 /* No filtering if vid = 0 */ 2516 if (!vid) 2517 return 0; 2518 2519 mdp->vlan_num_ids++; 2520 2521 /* The controller has one VLAN tag HW filter. So, if the filter is 2522 * already enabled, the driver disables it and the filte 2523 */ 2524 if (mdp->vlan_num_ids > 1) { 2525 /* disable VLAN filter */ 2526 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2527 return 0; 2528 } 2529 2530 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), 2531 vtag_reg_index); 2532 2533 return 0; 2534 } 2535 2536 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, 2537 __be16 proto, u16 vid) 2538 { 2539 struct sh_eth_private *mdp = netdev_priv(ndev); 2540 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2541 2542 if (unlikely(!mdp->cd->tsu)) 2543 return -EPERM; 2544 2545 /* No filtering if vid = 0 */ 2546 if (!vid) 2547 return 0; 2548 2549 mdp->vlan_num_ids--; 2550 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2551 2552 return 0; 2553 } 2554 2555 /* SuperH's TSU register init function */ 2556 static void sh_eth_tsu_init(struct sh_eth_private *mdp) 2557 { 2558 if (sh_eth_is_rz_fast_ether(mdp)) { 2559 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2560 return; 2561 } 2562 2563 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ 2564 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ 2565 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ 2566 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); 2567 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); 2568 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); 2569 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); 2570 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); 2571 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); 2572 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); 2573 if (sh_eth_is_gether(mdp)) { 2574 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ 2575 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ 2576 } else { 2577 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ 2578 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ 2579 } 2580 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ 2581 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ 2582 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2583 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ 2584 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ 2585 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ 2586 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ 2587 } 2588 2589 /* MDIO bus release function */ 2590 static int sh_mdio_release(struct sh_eth_private *mdp) 2591 { 2592 /* unregister mdio bus */ 2593 mdiobus_unregister(mdp->mii_bus); 2594 2595 /* free bitbang info */ 2596 free_mdio_bitbang(mdp->mii_bus); 2597 2598 return 0; 2599 } 2600 2601 /* MDIO bus init function */ 2602 static int sh_mdio_init(struct sh_eth_private *mdp, 2603 struct sh_eth_plat_data *pd) 2604 { 2605 int ret, i; 2606 struct bb_info *bitbang; 2607 struct platform_device *pdev = mdp->pdev; 2608 struct device *dev = &mdp->pdev->dev; 2609 2610 /* create bit control struct for PHY */ 2611 bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL); 2612 if (!bitbang) 2613 return -ENOMEM; 2614 2615 /* bitbang init */ 2616 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2617 bitbang->set_gate = pd->set_mdio_gate; 2618 bitbang->mdi_msk = PIR_MDI; 2619 bitbang->mdo_msk = PIR_MDO; 2620 bitbang->mmd_msk = PIR_MMD; 2621 bitbang->mdc_msk = PIR_MDC; 2622 bitbang->ctrl.ops = &bb_ops; 2623 2624 /* MII controller setting */ 2625 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2626 if (!mdp->mii_bus) 2627 return -ENOMEM; 2628 2629 /* Hook up MII support for ethtool */ 2630 mdp->mii_bus->name = "sh_mii"; 2631 mdp->mii_bus->parent = dev; 2632 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2633 pdev->name, pdev->id); 2634 2635 /* PHY IRQ */ 2636 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int), 2637 GFP_KERNEL); 2638 if (!mdp->mii_bus->irq) { 2639 ret = -ENOMEM; 2640 goto out_free_bus; 2641 } 2642 2643 /* register MDIO bus */ 2644 if (dev->of_node) { 2645 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); 2646 } else { 2647 for (i = 0; i < PHY_MAX_ADDR; i++) 2648 mdp->mii_bus->irq[i] = PHY_POLL; 2649 if (pd->phy_irq > 0) 2650 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; 2651 2652 ret = mdiobus_register(mdp->mii_bus); 2653 } 2654 2655 if (ret) 2656 goto out_free_bus; 2657 2658 return 0; 2659 2660 out_free_bus: 2661 free_mdio_bitbang(mdp->mii_bus); 2662 return ret; 2663 } 2664 2665 static const u16 *sh_eth_get_register_offset(int register_type) 2666 { 2667 const u16 *reg_offset = NULL; 2668 2669 switch (register_type) { 2670 case SH_ETH_REG_GIGABIT: 2671 reg_offset = sh_eth_offset_gigabit; 2672 break; 2673 case SH_ETH_REG_FAST_RZ: 2674 reg_offset = sh_eth_offset_fast_rz; 2675 break; 2676 case SH_ETH_REG_FAST_RCAR: 2677 reg_offset = sh_eth_offset_fast_rcar; 2678 break; 2679 case SH_ETH_REG_FAST_SH4: 2680 reg_offset = sh_eth_offset_fast_sh4; 2681 break; 2682 case SH_ETH_REG_FAST_SH3_SH2: 2683 reg_offset = sh_eth_offset_fast_sh3_sh2; 2684 break; 2685 default: 2686 break; 2687 } 2688 2689 return reg_offset; 2690 } 2691 2692 static const struct net_device_ops sh_eth_netdev_ops = { 2693 .ndo_open = sh_eth_open, 2694 .ndo_stop = sh_eth_close, 2695 .ndo_start_xmit = sh_eth_start_xmit, 2696 .ndo_get_stats = sh_eth_get_stats, 2697 .ndo_tx_timeout = sh_eth_tx_timeout, 2698 .ndo_do_ioctl = sh_eth_do_ioctl, 2699 .ndo_validate_addr = eth_validate_addr, 2700 .ndo_set_mac_address = eth_mac_addr, 2701 .ndo_change_mtu = eth_change_mtu, 2702 }; 2703 2704 static const struct net_device_ops sh_eth_netdev_ops_tsu = { 2705 .ndo_open = sh_eth_open, 2706 .ndo_stop = sh_eth_close, 2707 .ndo_start_xmit = sh_eth_start_xmit, 2708 .ndo_get_stats = sh_eth_get_stats, 2709 .ndo_set_rx_mode = sh_eth_set_multicast_list, 2710 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, 2711 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, 2712 .ndo_tx_timeout = sh_eth_tx_timeout, 2713 .ndo_do_ioctl = sh_eth_do_ioctl, 2714 .ndo_validate_addr = eth_validate_addr, 2715 .ndo_set_mac_address = eth_mac_addr, 2716 .ndo_change_mtu = eth_change_mtu, 2717 }; 2718 2719 #ifdef CONFIG_OF 2720 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) 2721 { 2722 struct device_node *np = dev->of_node; 2723 struct sh_eth_plat_data *pdata; 2724 const char *mac_addr; 2725 2726 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 2727 if (!pdata) 2728 return NULL; 2729 2730 pdata->phy_interface = of_get_phy_mode(np); 2731 2732 mac_addr = of_get_mac_address(np); 2733 if (mac_addr) 2734 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); 2735 2736 pdata->no_ether_link = 2737 of_property_read_bool(np, "renesas,no-ether-link"); 2738 pdata->ether_link_active_low = 2739 of_property_read_bool(np, "renesas,ether-link-active-low"); 2740 2741 return pdata; 2742 } 2743 2744 static const struct of_device_id sh_eth_match_table[] = { 2745 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data }, 2746 { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data }, 2747 { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data }, 2748 { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data }, 2749 { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data }, 2750 { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data }, 2751 { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data }, 2752 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, 2753 { } 2754 }; 2755 MODULE_DEVICE_TABLE(of, sh_eth_match_table); 2756 #else 2757 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) 2758 { 2759 return NULL; 2760 } 2761 #endif 2762 2763 static int sh_eth_drv_probe(struct platform_device *pdev) 2764 { 2765 int ret, devno = 0; 2766 struct resource *res; 2767 struct net_device *ndev = NULL; 2768 struct sh_eth_private *mdp = NULL; 2769 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev); 2770 const struct platform_device_id *id = platform_get_device_id(pdev); 2771 2772 /* get base addr */ 2773 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2774 2775 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 2776 if (!ndev) 2777 return -ENOMEM; 2778 2779 pm_runtime_enable(&pdev->dev); 2780 pm_runtime_get_sync(&pdev->dev); 2781 2782 devno = pdev->id; 2783 if (devno < 0) 2784 devno = 0; 2785 2786 ndev->dma = -1; 2787 ret = platform_get_irq(pdev, 0); 2788 if (ret < 0) { 2789 ret = -ENODEV; 2790 goto out_release; 2791 } 2792 ndev->irq = ret; 2793 2794 SET_NETDEV_DEV(ndev, &pdev->dev); 2795 2796 mdp = netdev_priv(ndev); 2797 mdp->num_tx_ring = TX_RING_SIZE; 2798 mdp->num_rx_ring = RX_RING_SIZE; 2799 mdp->addr = devm_ioremap_resource(&pdev->dev, res); 2800 if (IS_ERR(mdp->addr)) { 2801 ret = PTR_ERR(mdp->addr); 2802 goto out_release; 2803 } 2804 2805 ndev->base_addr = res->start; 2806 2807 spin_lock_init(&mdp->lock); 2808 mdp->pdev = pdev; 2809 2810 if (pdev->dev.of_node) 2811 pd = sh_eth_parse_dt(&pdev->dev); 2812 if (!pd) { 2813 dev_err(&pdev->dev, "no platform data\n"); 2814 ret = -EINVAL; 2815 goto out_release; 2816 } 2817 2818 /* get PHY ID */ 2819 mdp->phy_id = pd->phy; 2820 mdp->phy_interface = pd->phy_interface; 2821 /* EDMAC endian */ 2822 mdp->edmac_endian = pd->edmac_endian; 2823 mdp->no_ether_link = pd->no_ether_link; 2824 mdp->ether_link_active_low = pd->ether_link_active_low; 2825 2826 /* set cpu data */ 2827 if (id) { 2828 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; 2829 } else { 2830 const struct of_device_id *match; 2831 2832 match = of_match_device(of_match_ptr(sh_eth_match_table), 2833 &pdev->dev); 2834 mdp->cd = (struct sh_eth_cpu_data *)match->data; 2835 } 2836 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); 2837 if (!mdp->reg_offset) { 2838 dev_err(&pdev->dev, "Unknown register type (%d)\n", 2839 mdp->cd->register_type); 2840 ret = -EINVAL; 2841 goto out_release; 2842 } 2843 sh_eth_set_default_cpu_data(mdp->cd); 2844 2845 /* set function */ 2846 if (mdp->cd->tsu) 2847 ndev->netdev_ops = &sh_eth_netdev_ops_tsu; 2848 else 2849 ndev->netdev_ops = &sh_eth_netdev_ops; 2850 ndev->ethtool_ops = &sh_eth_ethtool_ops; 2851 ndev->watchdog_timeo = TX_TIMEOUT; 2852 2853 /* debug message level */ 2854 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 2855 2856 /* read and set MAC address */ 2857 read_mac_address(ndev, pd->mac_addr); 2858 if (!is_valid_ether_addr(ndev->dev_addr)) { 2859 dev_warn(&pdev->dev, 2860 "no valid MAC address supplied, using a random one.\n"); 2861 eth_hw_addr_random(ndev); 2862 } 2863 2864 /* ioremap the TSU registers */ 2865 if (mdp->cd->tsu) { 2866 struct resource *rtsu; 2867 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2868 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); 2869 if (IS_ERR(mdp->tsu_addr)) { 2870 ret = PTR_ERR(mdp->tsu_addr); 2871 goto out_release; 2872 } 2873 mdp->port = devno % 2; 2874 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; 2875 } 2876 2877 /* initialize first or needed device */ 2878 if (!devno || pd->needs_init) { 2879 if (mdp->cd->chip_reset) 2880 mdp->cd->chip_reset(ndev); 2881 2882 if (mdp->cd->tsu) { 2883 /* TSU init (Init only)*/ 2884 sh_eth_tsu_init(mdp); 2885 } 2886 } 2887 2888 if (mdp->cd->rmiimode) 2889 sh_eth_write(ndev, 0x1, RMIIMODE); 2890 2891 /* MDIO bus init */ 2892 ret = sh_mdio_init(mdp, pd); 2893 if (ret) { 2894 dev_err(&ndev->dev, "failed to initialise MDIO\n"); 2895 goto out_release; 2896 } 2897 2898 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); 2899 2900 /* network device register */ 2901 ret = register_netdev(ndev); 2902 if (ret) 2903 goto out_napi_del; 2904 2905 /* print device information */ 2906 netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n", 2907 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2908 2909 pm_runtime_put(&pdev->dev); 2910 platform_set_drvdata(pdev, ndev); 2911 2912 return ret; 2913 2914 out_napi_del: 2915 netif_napi_del(&mdp->napi); 2916 sh_mdio_release(mdp); 2917 2918 out_release: 2919 /* net_dev free */ 2920 if (ndev) 2921 free_netdev(ndev); 2922 2923 pm_runtime_put(&pdev->dev); 2924 pm_runtime_disable(&pdev->dev); 2925 return ret; 2926 } 2927 2928 static int sh_eth_drv_remove(struct platform_device *pdev) 2929 { 2930 struct net_device *ndev = platform_get_drvdata(pdev); 2931 struct sh_eth_private *mdp = netdev_priv(ndev); 2932 2933 unregister_netdev(ndev); 2934 netif_napi_del(&mdp->napi); 2935 sh_mdio_release(mdp); 2936 pm_runtime_disable(&pdev->dev); 2937 free_netdev(ndev); 2938 2939 return 0; 2940 } 2941 2942 #ifdef CONFIG_PM 2943 static int sh_eth_runtime_nop(struct device *dev) 2944 { 2945 /* Runtime PM callback shared between ->runtime_suspend() 2946 * and ->runtime_resume(). Simply returns success. 2947 * 2948 * This driver re-initializes all registers after 2949 * pm_runtime_get_sync() anyway so there is no need 2950 * to save and restore registers here. 2951 */ 2952 return 0; 2953 } 2954 2955 static const struct dev_pm_ops sh_eth_dev_pm_ops = { 2956 .runtime_suspend = sh_eth_runtime_nop, 2957 .runtime_resume = sh_eth_runtime_nop, 2958 }; 2959 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) 2960 #else 2961 #define SH_ETH_PM_OPS NULL 2962 #endif 2963 2964 static struct platform_device_id sh_eth_id_table[] = { 2965 { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, 2966 { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, 2967 { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, 2968 { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, 2969 { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, 2970 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, 2971 { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, 2972 { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data }, 2973 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, 2974 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, 2975 { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data }, 2976 { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data }, 2977 { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data }, 2978 { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data }, 2979 { } 2980 }; 2981 MODULE_DEVICE_TABLE(platform, sh_eth_id_table); 2982 2983 static struct platform_driver sh_eth_driver = { 2984 .probe = sh_eth_drv_probe, 2985 .remove = sh_eth_drv_remove, 2986 .id_table = sh_eth_id_table, 2987 .driver = { 2988 .name = CARDNAME, 2989 .pm = SH_ETH_PM_OPS, 2990 .of_match_table = of_match_ptr(sh_eth_match_table), 2991 }, 2992 }; 2993 2994 module_platform_driver(sh_eth_driver); 2995 2996 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); 2997 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); 2998 MODULE_LICENSE("GPL v2"); 2999