1 /*  SuperH Ethernet device driver
2  *
3  *  Copyright (C) 2014  Renesas Electronics Corporation
4  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
6  *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
7  *  Copyright (C) 2014 Codethink Limited
8  *
9  *  This program is free software; you can redistribute it and/or modify it
10  *  under the terms and conditions of the GNU General Public License,
11  *  version 2, as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope it will be useful, but WITHOUT
14  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  *  more details.
17  *
18  *  The full GNU General Public License is included in this distribution in
19  *  the file called "COPYING".
20  */
21 
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
38 #include <linux/io.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
46 
47 #include "sh_eth.h"
48 
49 #define SH_ETH_DEF_MSG_ENABLE \
50 		(NETIF_MSG_LINK	| \
51 		NETIF_MSG_TIMER	| \
52 		NETIF_MSG_RX_ERR| \
53 		NETIF_MSG_TX_ERR)
54 
55 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
56 	[EDSR]		= 0x0000,
57 	[EDMR]		= 0x0400,
58 	[EDTRR]		= 0x0408,
59 	[EDRRR]		= 0x0410,
60 	[EESR]		= 0x0428,
61 	[EESIPR]	= 0x0430,
62 	[TDLAR]		= 0x0010,
63 	[TDFAR]		= 0x0014,
64 	[TDFXR]		= 0x0018,
65 	[TDFFR]		= 0x001c,
66 	[RDLAR]		= 0x0030,
67 	[RDFAR]		= 0x0034,
68 	[RDFXR]		= 0x0038,
69 	[RDFFR]		= 0x003c,
70 	[TRSCER]	= 0x0438,
71 	[RMFCR]		= 0x0440,
72 	[TFTR]		= 0x0448,
73 	[FDR]		= 0x0450,
74 	[RMCR]		= 0x0458,
75 	[RPADIR]	= 0x0460,
76 	[FCFTR]		= 0x0468,
77 	[CSMR]		= 0x04E4,
78 
79 	[ECMR]		= 0x0500,
80 	[ECSR]		= 0x0510,
81 	[ECSIPR]	= 0x0518,
82 	[PIR]		= 0x0520,
83 	[PSR]		= 0x0528,
84 	[PIPR]		= 0x052c,
85 	[RFLR]		= 0x0508,
86 	[APR]		= 0x0554,
87 	[MPR]		= 0x0558,
88 	[PFTCR]		= 0x055c,
89 	[PFRCR]		= 0x0560,
90 	[TPAUSER]	= 0x0564,
91 	[GECMR]		= 0x05b0,
92 	[BCULR]		= 0x05b4,
93 	[MAHR]		= 0x05c0,
94 	[MALR]		= 0x05c8,
95 	[TROCR]		= 0x0700,
96 	[CDCR]		= 0x0708,
97 	[LCCR]		= 0x0710,
98 	[CEFCR]		= 0x0740,
99 	[FRECR]		= 0x0748,
100 	[TSFRCR]	= 0x0750,
101 	[TLFRCR]	= 0x0758,
102 	[RFCR]		= 0x0760,
103 	[CERCR]		= 0x0768,
104 	[CEECR]		= 0x0770,
105 	[MAFCR]		= 0x0778,
106 	[RMII_MII]	= 0x0790,
107 
108 	[ARSTR]		= 0x0000,
109 	[TSU_CTRST]	= 0x0004,
110 	[TSU_FWEN0]	= 0x0010,
111 	[TSU_FWEN1]	= 0x0014,
112 	[TSU_FCM]	= 0x0018,
113 	[TSU_BSYSL0]	= 0x0020,
114 	[TSU_BSYSL1]	= 0x0024,
115 	[TSU_PRISL0]	= 0x0028,
116 	[TSU_PRISL1]	= 0x002c,
117 	[TSU_FWSL0]	= 0x0030,
118 	[TSU_FWSL1]	= 0x0034,
119 	[TSU_FWSLC]	= 0x0038,
120 	[TSU_QTAG0]	= 0x0040,
121 	[TSU_QTAG1]	= 0x0044,
122 	[TSU_FWSR]	= 0x0050,
123 	[TSU_FWINMK]	= 0x0054,
124 	[TSU_ADQT0]	= 0x0048,
125 	[TSU_ADQT1]	= 0x004c,
126 	[TSU_VTAG0]	= 0x0058,
127 	[TSU_VTAG1]	= 0x005c,
128 	[TSU_ADSBSY]	= 0x0060,
129 	[TSU_TEN]	= 0x0064,
130 	[TSU_POST1]	= 0x0070,
131 	[TSU_POST2]	= 0x0074,
132 	[TSU_POST3]	= 0x0078,
133 	[TSU_POST4]	= 0x007c,
134 	[TSU_ADRH0]	= 0x0100,
135 	[TSU_ADRL0]	= 0x0104,
136 	[TSU_ADRH31]	= 0x01f8,
137 	[TSU_ADRL31]	= 0x01fc,
138 
139 	[TXNLCR0]	= 0x0080,
140 	[TXALCR0]	= 0x0084,
141 	[RXNLCR0]	= 0x0088,
142 	[RXALCR0]	= 0x008c,
143 	[FWNLCR0]	= 0x0090,
144 	[FWALCR0]	= 0x0094,
145 	[TXNLCR1]	= 0x00a0,
146 	[TXALCR1]	= 0x00a0,
147 	[RXNLCR1]	= 0x00a8,
148 	[RXALCR1]	= 0x00ac,
149 	[FWNLCR1]	= 0x00b0,
150 	[FWALCR1]	= 0x00b4,
151 };
152 
153 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
154 	[EDSR]		= 0x0000,
155 	[EDMR]		= 0x0400,
156 	[EDTRR]		= 0x0408,
157 	[EDRRR]		= 0x0410,
158 	[EESR]		= 0x0428,
159 	[EESIPR]	= 0x0430,
160 	[TDLAR]		= 0x0010,
161 	[TDFAR]		= 0x0014,
162 	[TDFXR]		= 0x0018,
163 	[TDFFR]		= 0x001c,
164 	[RDLAR]		= 0x0030,
165 	[RDFAR]		= 0x0034,
166 	[RDFXR]		= 0x0038,
167 	[RDFFR]		= 0x003c,
168 	[TRSCER]	= 0x0438,
169 	[RMFCR]		= 0x0440,
170 	[TFTR]		= 0x0448,
171 	[FDR]		= 0x0450,
172 	[RMCR]		= 0x0458,
173 	[RPADIR]	= 0x0460,
174 	[FCFTR]		= 0x0468,
175 	[CSMR]		= 0x04E4,
176 
177 	[ECMR]		= 0x0500,
178 	[RFLR]		= 0x0508,
179 	[ECSR]		= 0x0510,
180 	[ECSIPR]	= 0x0518,
181 	[PIR]		= 0x0520,
182 	[APR]		= 0x0554,
183 	[MPR]		= 0x0558,
184 	[PFTCR]		= 0x055c,
185 	[PFRCR]		= 0x0560,
186 	[TPAUSER]	= 0x0564,
187 	[MAHR]		= 0x05c0,
188 	[MALR]		= 0x05c8,
189 	[CEFCR]		= 0x0740,
190 	[FRECR]		= 0x0748,
191 	[TSFRCR]	= 0x0750,
192 	[TLFRCR]	= 0x0758,
193 	[RFCR]		= 0x0760,
194 	[MAFCR]		= 0x0778,
195 
196 	[ARSTR]		= 0x0000,
197 	[TSU_CTRST]	= 0x0004,
198 	[TSU_VTAG0]	= 0x0058,
199 	[TSU_ADSBSY]	= 0x0060,
200 	[TSU_TEN]	= 0x0064,
201 	[TSU_ADRH0]	= 0x0100,
202 	[TSU_ADRL0]	= 0x0104,
203 	[TSU_ADRH31]	= 0x01f8,
204 	[TSU_ADRL31]	= 0x01fc,
205 
206 	[TXNLCR0]	= 0x0080,
207 	[TXALCR0]	= 0x0084,
208 	[RXNLCR0]	= 0x0088,
209 	[RXALCR0]	= 0x008C,
210 };
211 
212 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
213 	[ECMR]		= 0x0300,
214 	[RFLR]		= 0x0308,
215 	[ECSR]		= 0x0310,
216 	[ECSIPR]	= 0x0318,
217 	[PIR]		= 0x0320,
218 	[PSR]		= 0x0328,
219 	[RDMLR]		= 0x0340,
220 	[IPGR]		= 0x0350,
221 	[APR]		= 0x0354,
222 	[MPR]		= 0x0358,
223 	[RFCF]		= 0x0360,
224 	[TPAUSER]	= 0x0364,
225 	[TPAUSECR]	= 0x0368,
226 	[MAHR]		= 0x03c0,
227 	[MALR]		= 0x03c8,
228 	[TROCR]		= 0x03d0,
229 	[CDCR]		= 0x03d4,
230 	[LCCR]		= 0x03d8,
231 	[CNDCR]		= 0x03dc,
232 	[CEFCR]		= 0x03e4,
233 	[FRECR]		= 0x03e8,
234 	[TSFRCR]	= 0x03ec,
235 	[TLFRCR]	= 0x03f0,
236 	[RFCR]		= 0x03f4,
237 	[MAFCR]		= 0x03f8,
238 
239 	[EDMR]		= 0x0200,
240 	[EDTRR]		= 0x0208,
241 	[EDRRR]		= 0x0210,
242 	[TDLAR]		= 0x0218,
243 	[RDLAR]		= 0x0220,
244 	[EESR]		= 0x0228,
245 	[EESIPR]	= 0x0230,
246 	[TRSCER]	= 0x0238,
247 	[RMFCR]		= 0x0240,
248 	[TFTR]		= 0x0248,
249 	[FDR]		= 0x0250,
250 	[RMCR]		= 0x0258,
251 	[TFUCR]		= 0x0264,
252 	[RFOCR]		= 0x0268,
253 	[RMIIMODE]      = 0x026c,
254 	[FCFTR]		= 0x0270,
255 	[TRIMD]		= 0x027c,
256 };
257 
258 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
259 	[ECMR]		= 0x0100,
260 	[RFLR]		= 0x0108,
261 	[ECSR]		= 0x0110,
262 	[ECSIPR]	= 0x0118,
263 	[PIR]		= 0x0120,
264 	[PSR]		= 0x0128,
265 	[RDMLR]		= 0x0140,
266 	[IPGR]		= 0x0150,
267 	[APR]		= 0x0154,
268 	[MPR]		= 0x0158,
269 	[TPAUSER]	= 0x0164,
270 	[RFCF]		= 0x0160,
271 	[TPAUSECR]	= 0x0168,
272 	[BCFRR]		= 0x016c,
273 	[MAHR]		= 0x01c0,
274 	[MALR]		= 0x01c8,
275 	[TROCR]		= 0x01d0,
276 	[CDCR]		= 0x01d4,
277 	[LCCR]		= 0x01d8,
278 	[CNDCR]		= 0x01dc,
279 	[CEFCR]		= 0x01e4,
280 	[FRECR]		= 0x01e8,
281 	[TSFRCR]	= 0x01ec,
282 	[TLFRCR]	= 0x01f0,
283 	[RFCR]		= 0x01f4,
284 	[MAFCR]		= 0x01f8,
285 	[RTRATE]	= 0x01fc,
286 
287 	[EDMR]		= 0x0000,
288 	[EDTRR]		= 0x0008,
289 	[EDRRR]		= 0x0010,
290 	[TDLAR]		= 0x0018,
291 	[RDLAR]		= 0x0020,
292 	[EESR]		= 0x0028,
293 	[EESIPR]	= 0x0030,
294 	[TRSCER]	= 0x0038,
295 	[RMFCR]		= 0x0040,
296 	[TFTR]		= 0x0048,
297 	[FDR]		= 0x0050,
298 	[RMCR]		= 0x0058,
299 	[TFUCR]		= 0x0064,
300 	[RFOCR]		= 0x0068,
301 	[FCFTR]		= 0x0070,
302 	[RPADIR]	= 0x0078,
303 	[TRIMD]		= 0x007c,
304 	[RBWAR]		= 0x00c8,
305 	[RDFAR]		= 0x00cc,
306 	[TBRAR]		= 0x00d4,
307 	[TDFAR]		= 0x00d8,
308 };
309 
310 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
311 	[EDMR]		= 0x0000,
312 	[EDTRR]		= 0x0004,
313 	[EDRRR]		= 0x0008,
314 	[TDLAR]		= 0x000c,
315 	[RDLAR]		= 0x0010,
316 	[EESR]		= 0x0014,
317 	[EESIPR]	= 0x0018,
318 	[TRSCER]	= 0x001c,
319 	[RMFCR]		= 0x0020,
320 	[TFTR]		= 0x0024,
321 	[FDR]		= 0x0028,
322 	[RMCR]		= 0x002c,
323 	[EDOCR]		= 0x0030,
324 	[FCFTR]		= 0x0034,
325 	[RPADIR]	= 0x0038,
326 	[TRIMD]		= 0x003c,
327 	[RBWAR]		= 0x0040,
328 	[RDFAR]		= 0x0044,
329 	[TBRAR]		= 0x004c,
330 	[TDFAR]		= 0x0050,
331 
332 	[ECMR]		= 0x0160,
333 	[ECSR]		= 0x0164,
334 	[ECSIPR]	= 0x0168,
335 	[PIR]		= 0x016c,
336 	[MAHR]		= 0x0170,
337 	[MALR]		= 0x0174,
338 	[RFLR]		= 0x0178,
339 	[PSR]		= 0x017c,
340 	[TROCR]		= 0x0180,
341 	[CDCR]		= 0x0184,
342 	[LCCR]		= 0x0188,
343 	[CNDCR]		= 0x018c,
344 	[CEFCR]		= 0x0194,
345 	[FRECR]		= 0x0198,
346 	[TSFRCR]	= 0x019c,
347 	[TLFRCR]	= 0x01a0,
348 	[RFCR]		= 0x01a4,
349 	[MAFCR]		= 0x01a8,
350 	[IPGR]		= 0x01b4,
351 	[APR]		= 0x01b8,
352 	[MPR]		= 0x01bc,
353 	[TPAUSER]	= 0x01c4,
354 	[BCFR]		= 0x01cc,
355 
356 	[ARSTR]		= 0x0000,
357 	[TSU_CTRST]	= 0x0004,
358 	[TSU_FWEN0]	= 0x0010,
359 	[TSU_FWEN1]	= 0x0014,
360 	[TSU_FCM]	= 0x0018,
361 	[TSU_BSYSL0]	= 0x0020,
362 	[TSU_BSYSL1]	= 0x0024,
363 	[TSU_PRISL0]	= 0x0028,
364 	[TSU_PRISL1]	= 0x002c,
365 	[TSU_FWSL0]	= 0x0030,
366 	[TSU_FWSL1]	= 0x0034,
367 	[TSU_FWSLC]	= 0x0038,
368 	[TSU_QTAGM0]	= 0x0040,
369 	[TSU_QTAGM1]	= 0x0044,
370 	[TSU_ADQT0]	= 0x0048,
371 	[TSU_ADQT1]	= 0x004c,
372 	[TSU_FWSR]	= 0x0050,
373 	[TSU_FWINMK]	= 0x0054,
374 	[TSU_ADSBSY]	= 0x0060,
375 	[TSU_TEN]	= 0x0064,
376 	[TSU_POST1]	= 0x0070,
377 	[TSU_POST2]	= 0x0074,
378 	[TSU_POST3]	= 0x0078,
379 	[TSU_POST4]	= 0x007c,
380 
381 	[TXNLCR0]	= 0x0080,
382 	[TXALCR0]	= 0x0084,
383 	[RXNLCR0]	= 0x0088,
384 	[RXALCR0]	= 0x008c,
385 	[FWNLCR0]	= 0x0090,
386 	[FWALCR0]	= 0x0094,
387 	[TXNLCR1]	= 0x00a0,
388 	[TXALCR1]	= 0x00a0,
389 	[RXNLCR1]	= 0x00a8,
390 	[RXALCR1]	= 0x00ac,
391 	[FWNLCR1]	= 0x00b0,
392 	[FWALCR1]	= 0x00b4,
393 
394 	[TSU_ADRH0]	= 0x0100,
395 	[TSU_ADRL0]	= 0x0104,
396 	[TSU_ADRL31]	= 0x01fc,
397 };
398 
399 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
400 {
401 	return mdp->reg_offset == sh_eth_offset_gigabit;
402 }
403 
404 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
405 {
406 	return mdp->reg_offset == sh_eth_offset_fast_rz;
407 }
408 
409 static void sh_eth_select_mii(struct net_device *ndev)
410 {
411 	u32 value = 0x0;
412 	struct sh_eth_private *mdp = netdev_priv(ndev);
413 
414 	switch (mdp->phy_interface) {
415 	case PHY_INTERFACE_MODE_GMII:
416 		value = 0x2;
417 		break;
418 	case PHY_INTERFACE_MODE_MII:
419 		value = 0x1;
420 		break;
421 	case PHY_INTERFACE_MODE_RMII:
422 		value = 0x0;
423 		break;
424 	default:
425 		netdev_warn(ndev,
426 			    "PHY interface mode was not setup. Set to MII.\n");
427 		value = 0x1;
428 		break;
429 	}
430 
431 	sh_eth_write(ndev, value, RMII_MII);
432 }
433 
434 static void sh_eth_set_duplex(struct net_device *ndev)
435 {
436 	struct sh_eth_private *mdp = netdev_priv(ndev);
437 
438 	if (mdp->duplex) /* Full */
439 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
440 	else		/* Half */
441 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
442 }
443 
444 /* There is CPU dependent code */
445 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
446 {
447 	struct sh_eth_private *mdp = netdev_priv(ndev);
448 
449 	switch (mdp->speed) {
450 	case 10: /* 10BASE */
451 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
452 		break;
453 	case 100:/* 100BASE */
454 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
455 		break;
456 	default:
457 		break;
458 	}
459 }
460 
461 /* R8A7778/9 */
462 static struct sh_eth_cpu_data r8a777x_data = {
463 	.set_duplex	= sh_eth_set_duplex,
464 	.set_rate	= sh_eth_set_rate_r8a777x,
465 
466 	.register_type	= SH_ETH_REG_FAST_RCAR,
467 
468 	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
469 	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
470 	.eesipr_value	= 0x01ff009f,
471 
472 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
473 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
474 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
475 			  EESR_ECI,
476 	.fdr_value	= 0x00000f0f,
477 
478 	.apr		= 1,
479 	.mpr		= 1,
480 	.tpauser	= 1,
481 	.hw_swap	= 1,
482 };
483 
484 /* R8A7790/1 */
485 static struct sh_eth_cpu_data r8a779x_data = {
486 	.set_duplex	= sh_eth_set_duplex,
487 	.set_rate	= sh_eth_set_rate_r8a777x,
488 
489 	.register_type	= SH_ETH_REG_FAST_RCAR,
490 
491 	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
492 	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
493 	.eesipr_value	= 0x01ff009f,
494 
495 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
496 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
497 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
498 			  EESR_ECI,
499 	.fdr_value	= 0x00000f0f,
500 
501 	.apr		= 1,
502 	.mpr		= 1,
503 	.tpauser	= 1,
504 	.hw_swap	= 1,
505 	.rmiimode	= 1,
506 	.shift_rd0	= 1,
507 };
508 
509 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
510 {
511 	struct sh_eth_private *mdp = netdev_priv(ndev);
512 
513 	switch (mdp->speed) {
514 	case 10: /* 10BASE */
515 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
516 		break;
517 	case 100:/* 100BASE */
518 		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
519 		break;
520 	default:
521 		break;
522 	}
523 }
524 
525 /* SH7724 */
526 static struct sh_eth_cpu_data sh7724_data = {
527 	.set_duplex	= sh_eth_set_duplex,
528 	.set_rate	= sh_eth_set_rate_sh7724,
529 
530 	.register_type	= SH_ETH_REG_FAST_SH4,
531 
532 	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
533 	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
534 	.eesipr_value	= 0x01ff009f,
535 
536 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
537 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
538 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
539 			  EESR_ECI,
540 
541 	.trscer_err_mask = DESC_I_RINT8,
542 
543 	.apr		= 1,
544 	.mpr		= 1,
545 	.tpauser	= 1,
546 	.hw_swap	= 1,
547 	.rpadir		= 1,
548 	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
549 };
550 
551 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
552 {
553 	struct sh_eth_private *mdp = netdev_priv(ndev);
554 
555 	switch (mdp->speed) {
556 	case 10: /* 10BASE */
557 		sh_eth_write(ndev, 0, RTRATE);
558 		break;
559 	case 100:/* 100BASE */
560 		sh_eth_write(ndev, 1, RTRATE);
561 		break;
562 	default:
563 		break;
564 	}
565 }
566 
567 /* SH7757 */
568 static struct sh_eth_cpu_data sh7757_data = {
569 	.set_duplex	= sh_eth_set_duplex,
570 	.set_rate	= sh_eth_set_rate_sh7757,
571 
572 	.register_type	= SH_ETH_REG_FAST_SH4,
573 
574 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
575 
576 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
577 	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
578 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
579 			  EESR_ECI,
580 
581 	.irq_flags	= IRQF_SHARED,
582 	.apr		= 1,
583 	.mpr		= 1,
584 	.tpauser	= 1,
585 	.hw_swap	= 1,
586 	.no_ade		= 1,
587 	.rpadir		= 1,
588 	.rpadir_value   = 2 << 16,
589 };
590 
591 #define SH_GIGA_ETH_BASE	0xfee00000UL
592 #define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
593 #define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
594 static void sh_eth_chip_reset_giga(struct net_device *ndev)
595 {
596 	int i;
597 	unsigned long mahr[2], malr[2];
598 
599 	/* save MAHR and MALR */
600 	for (i = 0; i < 2; i++) {
601 		malr[i] = ioread32((void *)GIGA_MALR(i));
602 		mahr[i] = ioread32((void *)GIGA_MAHR(i));
603 	}
604 
605 	/* reset device */
606 	iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
607 	mdelay(1);
608 
609 	/* restore MAHR and MALR */
610 	for (i = 0; i < 2; i++) {
611 		iowrite32(malr[i], (void *)GIGA_MALR(i));
612 		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
613 	}
614 }
615 
616 static void sh_eth_set_rate_giga(struct net_device *ndev)
617 {
618 	struct sh_eth_private *mdp = netdev_priv(ndev);
619 
620 	switch (mdp->speed) {
621 	case 10: /* 10BASE */
622 		sh_eth_write(ndev, 0x00000000, GECMR);
623 		break;
624 	case 100:/* 100BASE */
625 		sh_eth_write(ndev, 0x00000010, GECMR);
626 		break;
627 	case 1000: /* 1000BASE */
628 		sh_eth_write(ndev, 0x00000020, GECMR);
629 		break;
630 	default:
631 		break;
632 	}
633 }
634 
635 /* SH7757(GETHERC) */
636 static struct sh_eth_cpu_data sh7757_data_giga = {
637 	.chip_reset	= sh_eth_chip_reset_giga,
638 	.set_duplex	= sh_eth_set_duplex,
639 	.set_rate	= sh_eth_set_rate_giga,
640 
641 	.register_type	= SH_ETH_REG_GIGABIT,
642 
643 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
644 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
645 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
646 
647 	.tx_check	= EESR_TC1 | EESR_FTC,
648 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
649 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
650 			  EESR_TDE | EESR_ECI,
651 	.fdr_value	= 0x0000072f,
652 
653 	.irq_flags	= IRQF_SHARED,
654 	.apr		= 1,
655 	.mpr		= 1,
656 	.tpauser	= 1,
657 	.bculr		= 1,
658 	.hw_swap	= 1,
659 	.rpadir		= 1,
660 	.rpadir_value   = 2 << 16,
661 	.no_trimd	= 1,
662 	.no_ade		= 1,
663 	.tsu		= 1,
664 };
665 
666 static void sh_eth_chip_reset(struct net_device *ndev)
667 {
668 	struct sh_eth_private *mdp = netdev_priv(ndev);
669 
670 	/* reset device */
671 	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
672 	mdelay(1);
673 }
674 
675 static void sh_eth_set_rate_gether(struct net_device *ndev)
676 {
677 	struct sh_eth_private *mdp = netdev_priv(ndev);
678 
679 	switch (mdp->speed) {
680 	case 10: /* 10BASE */
681 		sh_eth_write(ndev, GECMR_10, GECMR);
682 		break;
683 	case 100:/* 100BASE */
684 		sh_eth_write(ndev, GECMR_100, GECMR);
685 		break;
686 	case 1000: /* 1000BASE */
687 		sh_eth_write(ndev, GECMR_1000, GECMR);
688 		break;
689 	default:
690 		break;
691 	}
692 }
693 
694 /* SH7734 */
695 static struct sh_eth_cpu_data sh7734_data = {
696 	.chip_reset	= sh_eth_chip_reset,
697 	.set_duplex	= sh_eth_set_duplex,
698 	.set_rate	= sh_eth_set_rate_gether,
699 
700 	.register_type	= SH_ETH_REG_GIGABIT,
701 
702 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
703 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
704 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
705 
706 	.tx_check	= EESR_TC1 | EESR_FTC,
707 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
708 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
709 			  EESR_TDE | EESR_ECI,
710 
711 	.apr		= 1,
712 	.mpr		= 1,
713 	.tpauser	= 1,
714 	.bculr		= 1,
715 	.hw_swap	= 1,
716 	.no_trimd	= 1,
717 	.no_ade		= 1,
718 	.tsu		= 1,
719 	.hw_crc		= 1,
720 	.select_mii	= 1,
721 };
722 
723 /* SH7763 */
724 static struct sh_eth_cpu_data sh7763_data = {
725 	.chip_reset	= sh_eth_chip_reset,
726 	.set_duplex	= sh_eth_set_duplex,
727 	.set_rate	= sh_eth_set_rate_gether,
728 
729 	.register_type	= SH_ETH_REG_GIGABIT,
730 
731 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
732 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
733 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
734 
735 	.tx_check	= EESR_TC1 | EESR_FTC,
736 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
737 			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
738 			  EESR_ECI,
739 
740 	.apr		= 1,
741 	.mpr		= 1,
742 	.tpauser	= 1,
743 	.bculr		= 1,
744 	.hw_swap	= 1,
745 	.no_trimd	= 1,
746 	.no_ade		= 1,
747 	.tsu		= 1,
748 	.irq_flags	= IRQF_SHARED,
749 };
750 
751 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
752 {
753 	struct sh_eth_private *mdp = netdev_priv(ndev);
754 
755 	/* reset device */
756 	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
757 	mdelay(1);
758 
759 	sh_eth_select_mii(ndev);
760 }
761 
762 /* R8A7740 */
763 static struct sh_eth_cpu_data r8a7740_data = {
764 	.chip_reset	= sh_eth_chip_reset_r8a7740,
765 	.set_duplex	= sh_eth_set_duplex,
766 	.set_rate	= sh_eth_set_rate_gether,
767 
768 	.register_type	= SH_ETH_REG_GIGABIT,
769 
770 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
771 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
772 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
773 
774 	.tx_check	= EESR_TC1 | EESR_FTC,
775 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
776 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
777 			  EESR_TDE | EESR_ECI,
778 	.fdr_value	= 0x0000070f,
779 
780 	.apr		= 1,
781 	.mpr		= 1,
782 	.tpauser	= 1,
783 	.bculr		= 1,
784 	.hw_swap	= 1,
785 	.rpadir		= 1,
786 	.rpadir_value   = 2 << 16,
787 	.no_trimd	= 1,
788 	.no_ade		= 1,
789 	.tsu		= 1,
790 	.select_mii	= 1,
791 	.shift_rd0	= 1,
792 };
793 
794 /* R7S72100 */
795 static struct sh_eth_cpu_data r7s72100_data = {
796 	.chip_reset	= sh_eth_chip_reset,
797 	.set_duplex	= sh_eth_set_duplex,
798 
799 	.register_type	= SH_ETH_REG_FAST_RZ,
800 
801 	.ecsr_value	= ECSR_ICD,
802 	.ecsipr_value	= ECSIPR_ICDIP,
803 	.eesipr_value	= 0xff7f009f,
804 
805 	.tx_check	= EESR_TC1 | EESR_FTC,
806 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
807 			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
808 			  EESR_TDE | EESR_ECI,
809 	.fdr_value	= 0x0000070f,
810 
811 	.no_psr		= 1,
812 	.apr		= 1,
813 	.mpr		= 1,
814 	.tpauser	= 1,
815 	.hw_swap	= 1,
816 	.rpadir		= 1,
817 	.rpadir_value   = 2 << 16,
818 	.no_trimd	= 1,
819 	.no_ade		= 1,
820 	.hw_crc		= 1,
821 	.tsu		= 1,
822 	.shift_rd0	= 1,
823 };
824 
825 static struct sh_eth_cpu_data sh7619_data = {
826 	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
827 
828 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
829 
830 	.apr		= 1,
831 	.mpr		= 1,
832 	.tpauser	= 1,
833 	.hw_swap	= 1,
834 };
835 
836 static struct sh_eth_cpu_data sh771x_data = {
837 	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
838 
839 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
840 	.tsu		= 1,
841 };
842 
843 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
844 {
845 	if (!cd->ecsr_value)
846 		cd->ecsr_value = DEFAULT_ECSR_INIT;
847 
848 	if (!cd->ecsipr_value)
849 		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
850 
851 	if (!cd->fcftr_value)
852 		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
853 				  DEFAULT_FIFO_F_D_RFD;
854 
855 	if (!cd->fdr_value)
856 		cd->fdr_value = DEFAULT_FDR_INIT;
857 
858 	if (!cd->tx_check)
859 		cd->tx_check = DEFAULT_TX_CHECK;
860 
861 	if (!cd->eesr_err_check)
862 		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
863 
864 	if (!cd->trscer_err_mask)
865 		cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
866 }
867 
868 static int sh_eth_check_reset(struct net_device *ndev)
869 {
870 	int ret = 0;
871 	int cnt = 100;
872 
873 	while (cnt > 0) {
874 		if (!(sh_eth_read(ndev, EDMR) & 0x3))
875 			break;
876 		mdelay(1);
877 		cnt--;
878 	}
879 	if (cnt <= 0) {
880 		netdev_err(ndev, "Device reset failed\n");
881 		ret = -ETIMEDOUT;
882 	}
883 	return ret;
884 }
885 
886 static int sh_eth_reset(struct net_device *ndev)
887 {
888 	struct sh_eth_private *mdp = netdev_priv(ndev);
889 	int ret = 0;
890 
891 	if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
892 		sh_eth_write(ndev, EDSR_ENALL, EDSR);
893 		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
894 			     EDMR);
895 
896 		ret = sh_eth_check_reset(ndev);
897 		if (ret)
898 			return ret;
899 
900 		/* Table Init */
901 		sh_eth_write(ndev, 0x0, TDLAR);
902 		sh_eth_write(ndev, 0x0, TDFAR);
903 		sh_eth_write(ndev, 0x0, TDFXR);
904 		sh_eth_write(ndev, 0x0, TDFFR);
905 		sh_eth_write(ndev, 0x0, RDLAR);
906 		sh_eth_write(ndev, 0x0, RDFAR);
907 		sh_eth_write(ndev, 0x0, RDFXR);
908 		sh_eth_write(ndev, 0x0, RDFFR);
909 
910 		/* Reset HW CRC register */
911 		if (mdp->cd->hw_crc)
912 			sh_eth_write(ndev, 0x0, CSMR);
913 
914 		/* Select MII mode */
915 		if (mdp->cd->select_mii)
916 			sh_eth_select_mii(ndev);
917 	} else {
918 		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
919 			     EDMR);
920 		mdelay(3);
921 		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
922 			     EDMR);
923 	}
924 
925 	return ret;
926 }
927 
928 static void sh_eth_set_receive_align(struct sk_buff *skb)
929 {
930 	uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
931 
932 	if (reserve)
933 		skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
934 }
935 
936 
937 /* CPU <-> EDMAC endian convert */
938 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
939 {
940 	switch (mdp->edmac_endian) {
941 	case EDMAC_LITTLE_ENDIAN:
942 		return cpu_to_le32(x);
943 	case EDMAC_BIG_ENDIAN:
944 		return cpu_to_be32(x);
945 	}
946 	return x;
947 }
948 
949 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
950 {
951 	switch (mdp->edmac_endian) {
952 	case EDMAC_LITTLE_ENDIAN:
953 		return le32_to_cpu(x);
954 	case EDMAC_BIG_ENDIAN:
955 		return be32_to_cpu(x);
956 	}
957 	return x;
958 }
959 
960 /* Program the hardware MAC address from dev->dev_addr. */
961 static void update_mac_address(struct net_device *ndev)
962 {
963 	sh_eth_write(ndev,
964 		     (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
965 		     (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
966 	sh_eth_write(ndev,
967 		     (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
968 }
969 
970 /* Get MAC address from SuperH MAC address register
971  *
972  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
973  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
974  * When you want use this device, you must set MAC address in bootloader.
975  *
976  */
977 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
978 {
979 	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
980 		memcpy(ndev->dev_addr, mac, ETH_ALEN);
981 	} else {
982 		ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
983 		ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
984 		ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
985 		ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
986 		ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
987 		ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
988 	}
989 }
990 
991 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
992 {
993 	if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
994 		return EDTRR_TRNS_GETHER;
995 	else
996 		return EDTRR_TRNS_ETHER;
997 }
998 
999 struct bb_info {
1000 	void (*set_gate)(void *addr);
1001 	struct mdiobb_ctrl ctrl;
1002 	void *addr;
1003 	u32 mmd_msk;/* MMD */
1004 	u32 mdo_msk;
1005 	u32 mdi_msk;
1006 	u32 mdc_msk;
1007 };
1008 
1009 /* PHY bit set */
1010 static void bb_set(void *addr, u32 msk)
1011 {
1012 	iowrite32(ioread32(addr) | msk, addr);
1013 }
1014 
1015 /* PHY bit clear */
1016 static void bb_clr(void *addr, u32 msk)
1017 {
1018 	iowrite32((ioread32(addr) & ~msk), addr);
1019 }
1020 
1021 /* PHY bit read */
1022 static int bb_read(void *addr, u32 msk)
1023 {
1024 	return (ioread32(addr) & msk) != 0;
1025 }
1026 
1027 /* Data I/O pin control */
1028 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1029 {
1030 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1031 
1032 	if (bitbang->set_gate)
1033 		bitbang->set_gate(bitbang->addr);
1034 
1035 	if (bit)
1036 		bb_set(bitbang->addr, bitbang->mmd_msk);
1037 	else
1038 		bb_clr(bitbang->addr, bitbang->mmd_msk);
1039 }
1040 
1041 /* Set bit data*/
1042 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1043 {
1044 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1045 
1046 	if (bitbang->set_gate)
1047 		bitbang->set_gate(bitbang->addr);
1048 
1049 	if (bit)
1050 		bb_set(bitbang->addr, bitbang->mdo_msk);
1051 	else
1052 		bb_clr(bitbang->addr, bitbang->mdo_msk);
1053 }
1054 
1055 /* Get bit data*/
1056 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1057 {
1058 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1059 
1060 	if (bitbang->set_gate)
1061 		bitbang->set_gate(bitbang->addr);
1062 
1063 	return bb_read(bitbang->addr, bitbang->mdi_msk);
1064 }
1065 
1066 /* MDC pin control */
1067 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1068 {
1069 	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1070 
1071 	if (bitbang->set_gate)
1072 		bitbang->set_gate(bitbang->addr);
1073 
1074 	if (bit)
1075 		bb_set(bitbang->addr, bitbang->mdc_msk);
1076 	else
1077 		bb_clr(bitbang->addr, bitbang->mdc_msk);
1078 }
1079 
1080 /* mdio bus control struct */
1081 static struct mdiobb_ops bb_ops = {
1082 	.owner = THIS_MODULE,
1083 	.set_mdc = sh_mdc_ctrl,
1084 	.set_mdio_dir = sh_mmd_ctrl,
1085 	.set_mdio_data = sh_set_mdio,
1086 	.get_mdio_data = sh_get_mdio,
1087 };
1088 
1089 /* free skb and descriptor buffer */
1090 static void sh_eth_ring_free(struct net_device *ndev)
1091 {
1092 	struct sh_eth_private *mdp = netdev_priv(ndev);
1093 	int i;
1094 
1095 	/* Free Rx skb ringbuffer */
1096 	if (mdp->rx_skbuff) {
1097 		for (i = 0; i < mdp->num_rx_ring; i++)
1098 			dev_kfree_skb(mdp->rx_skbuff[i]);
1099 	}
1100 	kfree(mdp->rx_skbuff);
1101 	mdp->rx_skbuff = NULL;
1102 
1103 	/* Free Tx skb ringbuffer */
1104 	if (mdp->tx_skbuff) {
1105 		for (i = 0; i < mdp->num_tx_ring; i++)
1106 			dev_kfree_skb(mdp->tx_skbuff[i]);
1107 	}
1108 	kfree(mdp->tx_skbuff);
1109 	mdp->tx_skbuff = NULL;
1110 }
1111 
1112 /* format skb and descriptor buffer */
1113 static void sh_eth_ring_format(struct net_device *ndev)
1114 {
1115 	struct sh_eth_private *mdp = netdev_priv(ndev);
1116 	int i;
1117 	struct sk_buff *skb;
1118 	struct sh_eth_rxdesc *rxdesc = NULL;
1119 	struct sh_eth_txdesc *txdesc = NULL;
1120 	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1121 	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1122 	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1123 
1124 	mdp->cur_rx = 0;
1125 	mdp->cur_tx = 0;
1126 	mdp->dirty_rx = 0;
1127 	mdp->dirty_tx = 0;
1128 
1129 	memset(mdp->rx_ring, 0, rx_ringsize);
1130 
1131 	/* build Rx ring buffer */
1132 	for (i = 0; i < mdp->num_rx_ring; i++) {
1133 		/* skb */
1134 		mdp->rx_skbuff[i] = NULL;
1135 		skb = netdev_alloc_skb(ndev, skbuff_size);
1136 		mdp->rx_skbuff[i] = skb;
1137 		if (skb == NULL)
1138 			break;
1139 		sh_eth_set_receive_align(skb);
1140 
1141 		/* RX descriptor */
1142 		rxdesc = &mdp->rx_ring[i];
1143 		/* The size of the buffer is a multiple of 16 bytes. */
1144 		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1145 		dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
1146 			       DMA_FROM_DEVICE);
1147 		rxdesc->addr = virt_to_phys(skb->data);
1148 		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1149 
1150 		/* Rx descriptor address set */
1151 		if (i == 0) {
1152 			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1153 			if (sh_eth_is_gether(mdp) ||
1154 			    sh_eth_is_rz_fast_ether(mdp))
1155 				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1156 		}
1157 	}
1158 
1159 	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1160 
1161 	/* Mark the last entry as wrapping the ring. */
1162 	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1163 
1164 	memset(mdp->tx_ring, 0, tx_ringsize);
1165 
1166 	/* build Tx ring buffer */
1167 	for (i = 0; i < mdp->num_tx_ring; i++) {
1168 		mdp->tx_skbuff[i] = NULL;
1169 		txdesc = &mdp->tx_ring[i];
1170 		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1171 		txdesc->buffer_length = 0;
1172 		if (i == 0) {
1173 			/* Tx descriptor address set */
1174 			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1175 			if (sh_eth_is_gether(mdp) ||
1176 			    sh_eth_is_rz_fast_ether(mdp))
1177 				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1178 		}
1179 	}
1180 
1181 	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1182 }
1183 
1184 /* Get skb and descriptor buffer */
1185 static int sh_eth_ring_init(struct net_device *ndev)
1186 {
1187 	struct sh_eth_private *mdp = netdev_priv(ndev);
1188 	int rx_ringsize, tx_ringsize, ret = 0;
1189 
1190 	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1191 	 * card needs room to do 8 byte alignment, +2 so we can reserve
1192 	 * the first 2 bytes, and +16 gets room for the status word from the
1193 	 * card.
1194 	 */
1195 	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1196 			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1197 	if (mdp->cd->rpadir)
1198 		mdp->rx_buf_sz += NET_IP_ALIGN;
1199 
1200 	/* Allocate RX and TX skb rings */
1201 	mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1202 				       sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1203 	if (!mdp->rx_skbuff) {
1204 		ret = -ENOMEM;
1205 		return ret;
1206 	}
1207 
1208 	mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1209 				       sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1210 	if (!mdp->tx_skbuff) {
1211 		ret = -ENOMEM;
1212 		goto skb_ring_free;
1213 	}
1214 
1215 	/* Allocate all Rx descriptors. */
1216 	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1217 	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1218 					  GFP_KERNEL);
1219 	if (!mdp->rx_ring) {
1220 		ret = -ENOMEM;
1221 		goto desc_ring_free;
1222 	}
1223 
1224 	mdp->dirty_rx = 0;
1225 
1226 	/* Allocate all Tx descriptors. */
1227 	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1228 	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1229 					  GFP_KERNEL);
1230 	if (!mdp->tx_ring) {
1231 		ret = -ENOMEM;
1232 		goto desc_ring_free;
1233 	}
1234 	return ret;
1235 
1236 desc_ring_free:
1237 	/* free DMA buffer */
1238 	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1239 
1240 skb_ring_free:
1241 	/* Free Rx and Tx skb ring buffer */
1242 	sh_eth_ring_free(ndev);
1243 	mdp->tx_ring = NULL;
1244 	mdp->rx_ring = NULL;
1245 
1246 	return ret;
1247 }
1248 
1249 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1250 {
1251 	int ringsize;
1252 
1253 	if (mdp->rx_ring) {
1254 		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1255 		dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1256 				  mdp->rx_desc_dma);
1257 		mdp->rx_ring = NULL;
1258 	}
1259 
1260 	if (mdp->tx_ring) {
1261 		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1262 		dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1263 				  mdp->tx_desc_dma);
1264 		mdp->tx_ring = NULL;
1265 	}
1266 }
1267 
1268 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1269 {
1270 	int ret = 0;
1271 	struct sh_eth_private *mdp = netdev_priv(ndev);
1272 	u32 val;
1273 
1274 	/* Soft Reset */
1275 	ret = sh_eth_reset(ndev);
1276 	if (ret)
1277 		return ret;
1278 
1279 	if (mdp->cd->rmiimode)
1280 		sh_eth_write(ndev, 0x1, RMIIMODE);
1281 
1282 	/* Descriptor format */
1283 	sh_eth_ring_format(ndev);
1284 	if (mdp->cd->rpadir)
1285 		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1286 
1287 	/* all sh_eth int mask */
1288 	sh_eth_write(ndev, 0, EESIPR);
1289 
1290 #if defined(__LITTLE_ENDIAN)
1291 	if (mdp->cd->hw_swap)
1292 		sh_eth_write(ndev, EDMR_EL, EDMR);
1293 	else
1294 #endif
1295 		sh_eth_write(ndev, 0, EDMR);
1296 
1297 	/* FIFO size set */
1298 	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1299 	sh_eth_write(ndev, 0, TFTR);
1300 
1301 	/* Frame recv control (enable multiple-packets per rx irq) */
1302 	sh_eth_write(ndev, RMCR_RNC, RMCR);
1303 
1304 	sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1305 
1306 	if (mdp->cd->bculr)
1307 		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
1308 
1309 	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1310 
1311 	if (!mdp->cd->no_trimd)
1312 		sh_eth_write(ndev, 0, TRIMD);
1313 
1314 	/* Recv frame limit set register */
1315 	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1316 		     RFLR);
1317 
1318 	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1319 	if (start)
1320 		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1321 
1322 	/* PAUSE Prohibition */
1323 	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1324 		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1325 
1326 	sh_eth_write(ndev, val, ECMR);
1327 
1328 	if (mdp->cd->set_rate)
1329 		mdp->cd->set_rate(ndev);
1330 
1331 	/* E-MAC Status Register clear */
1332 	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1333 
1334 	/* E-MAC Interrupt Enable register */
1335 	if (start)
1336 		sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1337 
1338 	/* Set MAC address */
1339 	update_mac_address(ndev);
1340 
1341 	/* mask reset */
1342 	if (mdp->cd->apr)
1343 		sh_eth_write(ndev, APR_AP, APR);
1344 	if (mdp->cd->mpr)
1345 		sh_eth_write(ndev, MPR_MP, MPR);
1346 	if (mdp->cd->tpauser)
1347 		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1348 
1349 	if (start) {
1350 		/* Setting the Rx mode will start the Rx process. */
1351 		sh_eth_write(ndev, EDRRR_R, EDRRR);
1352 
1353 		netif_start_queue(ndev);
1354 	}
1355 
1356 	return ret;
1357 }
1358 
1359 /* free Tx skb function */
1360 static int sh_eth_txfree(struct net_device *ndev)
1361 {
1362 	struct sh_eth_private *mdp = netdev_priv(ndev);
1363 	struct sh_eth_txdesc *txdesc;
1364 	int free_num = 0;
1365 	int entry = 0;
1366 
1367 	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1368 		entry = mdp->dirty_tx % mdp->num_tx_ring;
1369 		txdesc = &mdp->tx_ring[entry];
1370 		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1371 			break;
1372 		/* Free the original skb. */
1373 		if (mdp->tx_skbuff[entry]) {
1374 			dma_unmap_single(&ndev->dev, txdesc->addr,
1375 					 txdesc->buffer_length, DMA_TO_DEVICE);
1376 			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1377 			mdp->tx_skbuff[entry] = NULL;
1378 			free_num++;
1379 		}
1380 		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1381 		if (entry >= mdp->num_tx_ring - 1)
1382 			txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1383 
1384 		ndev->stats.tx_packets++;
1385 		ndev->stats.tx_bytes += txdesc->buffer_length;
1386 	}
1387 	return free_num;
1388 }
1389 
1390 /* Packet receive function */
1391 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1392 {
1393 	struct sh_eth_private *mdp = netdev_priv(ndev);
1394 	struct sh_eth_rxdesc *rxdesc;
1395 
1396 	int entry = mdp->cur_rx % mdp->num_rx_ring;
1397 	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1398 	int limit;
1399 	struct sk_buff *skb;
1400 	u16 pkt_len = 0;
1401 	u32 desc_status;
1402 	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1403 
1404 	boguscnt = min(boguscnt, *quota);
1405 	limit = boguscnt;
1406 	rxdesc = &mdp->rx_ring[entry];
1407 	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1408 		desc_status = edmac_to_cpu(mdp, rxdesc->status);
1409 		pkt_len = rxdesc->frame_length;
1410 
1411 		if (--boguscnt < 0)
1412 			break;
1413 
1414 		if (!(desc_status & RDFEND))
1415 			ndev->stats.rx_length_errors++;
1416 
1417 		/* In case of almost all GETHER/ETHERs, the Receive Frame State
1418 		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1419 		 * bit 0. However, in case of the R8A7740, R8A779x, and
1420 		 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
1421 		 * driver needs right shifting by 16.
1422 		 */
1423 		if (mdp->cd->shift_rd0)
1424 			desc_status >>= 16;
1425 
1426 		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1427 				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1428 			ndev->stats.rx_errors++;
1429 			if (desc_status & RD_RFS1)
1430 				ndev->stats.rx_crc_errors++;
1431 			if (desc_status & RD_RFS2)
1432 				ndev->stats.rx_frame_errors++;
1433 			if (desc_status & RD_RFS3)
1434 				ndev->stats.rx_length_errors++;
1435 			if (desc_status & RD_RFS4)
1436 				ndev->stats.rx_length_errors++;
1437 			if (desc_status & RD_RFS6)
1438 				ndev->stats.rx_missed_errors++;
1439 			if (desc_status & RD_RFS10)
1440 				ndev->stats.rx_over_errors++;
1441 		} else {
1442 			if (!mdp->cd->hw_swap)
1443 				sh_eth_soft_swap(
1444 					phys_to_virt(ALIGN(rxdesc->addr, 4)),
1445 					pkt_len + 2);
1446 			skb = mdp->rx_skbuff[entry];
1447 			mdp->rx_skbuff[entry] = NULL;
1448 			if (mdp->cd->rpadir)
1449 				skb_reserve(skb, NET_IP_ALIGN);
1450 			dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1451 						ALIGN(mdp->rx_buf_sz, 16),
1452 						DMA_FROM_DEVICE);
1453 			skb_put(skb, pkt_len);
1454 			skb->protocol = eth_type_trans(skb, ndev);
1455 			netif_receive_skb(skb);
1456 			ndev->stats.rx_packets++;
1457 			ndev->stats.rx_bytes += pkt_len;
1458 		}
1459 		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1460 		rxdesc = &mdp->rx_ring[entry];
1461 	}
1462 
1463 	/* Refill the Rx ring buffers. */
1464 	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1465 		entry = mdp->dirty_rx % mdp->num_rx_ring;
1466 		rxdesc = &mdp->rx_ring[entry];
1467 		/* The size of the buffer is 16 byte boundary. */
1468 		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1469 
1470 		if (mdp->rx_skbuff[entry] == NULL) {
1471 			skb = netdev_alloc_skb(ndev, skbuff_size);
1472 			mdp->rx_skbuff[entry] = skb;
1473 			if (skb == NULL)
1474 				break;	/* Better luck next round. */
1475 			sh_eth_set_receive_align(skb);
1476 			dma_map_single(&ndev->dev, skb->data,
1477 				       rxdesc->buffer_length, DMA_FROM_DEVICE);
1478 
1479 			skb_checksum_none_assert(skb);
1480 			rxdesc->addr = virt_to_phys(skb->data);
1481 		}
1482 		if (entry >= mdp->num_rx_ring - 1)
1483 			rxdesc->status |=
1484 				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1485 		else
1486 			rxdesc->status |=
1487 				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1488 	}
1489 
1490 	/* Restart Rx engine if stopped. */
1491 	/* If we don't need to check status, don't. -KDU */
1492 	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1493 		/* fix the values for the next receiving if RDE is set */
1494 		if (intr_status & EESR_RDE) {
1495 			u32 count = (sh_eth_read(ndev, RDFAR) -
1496 				     sh_eth_read(ndev, RDLAR)) >> 4;
1497 
1498 			mdp->cur_rx = count;
1499 			mdp->dirty_rx = count;
1500 		}
1501 		sh_eth_write(ndev, EDRRR_R, EDRRR);
1502 	}
1503 
1504 	*quota -= limit - boguscnt - 1;
1505 
1506 	return *quota <= 0;
1507 }
1508 
1509 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1510 {
1511 	/* disable tx and rx */
1512 	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1513 		~(ECMR_RE | ECMR_TE), ECMR);
1514 }
1515 
1516 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1517 {
1518 	/* enable tx and rx */
1519 	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1520 		(ECMR_RE | ECMR_TE), ECMR);
1521 }
1522 
1523 /* error control function */
1524 static void sh_eth_error(struct net_device *ndev, int intr_status)
1525 {
1526 	struct sh_eth_private *mdp = netdev_priv(ndev);
1527 	u32 felic_stat;
1528 	u32 link_stat;
1529 	u32 mask;
1530 
1531 	if (intr_status & EESR_ECI) {
1532 		felic_stat = sh_eth_read(ndev, ECSR);
1533 		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1534 		if (felic_stat & ECSR_ICD)
1535 			ndev->stats.tx_carrier_errors++;
1536 		if (felic_stat & ECSR_LCHNG) {
1537 			/* Link Changed */
1538 			if (mdp->cd->no_psr || mdp->no_ether_link) {
1539 				goto ignore_link;
1540 			} else {
1541 				link_stat = (sh_eth_read(ndev, PSR));
1542 				if (mdp->ether_link_active_low)
1543 					link_stat = ~link_stat;
1544 			}
1545 			if (!(link_stat & PHY_ST_LINK)) {
1546 				sh_eth_rcv_snd_disable(ndev);
1547 			} else {
1548 				/* Link Up */
1549 				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1550 						   ~DMAC_M_ECI, EESIPR);
1551 				/* clear int */
1552 				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1553 					     ECSR);
1554 				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1555 						   DMAC_M_ECI, EESIPR);
1556 				/* enable tx and rx */
1557 				sh_eth_rcv_snd_enable(ndev);
1558 			}
1559 		}
1560 	}
1561 
1562 ignore_link:
1563 	if (intr_status & EESR_TWB) {
1564 		/* Unused write back interrupt */
1565 		if (intr_status & EESR_TABT) {	/* Transmit Abort int */
1566 			ndev->stats.tx_aborted_errors++;
1567 			netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1568 		}
1569 	}
1570 
1571 	if (intr_status & EESR_RABT) {
1572 		/* Receive Abort int */
1573 		if (intr_status & EESR_RFRMER) {
1574 			/* Receive Frame Overflow int */
1575 			ndev->stats.rx_frame_errors++;
1576 			netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1577 		}
1578 	}
1579 
1580 	if (intr_status & EESR_TDE) {
1581 		/* Transmit Descriptor Empty int */
1582 		ndev->stats.tx_fifo_errors++;
1583 		netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1584 	}
1585 
1586 	if (intr_status & EESR_TFE) {
1587 		/* FIFO under flow */
1588 		ndev->stats.tx_fifo_errors++;
1589 		netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1590 	}
1591 
1592 	if (intr_status & EESR_RDE) {
1593 		/* Receive Descriptor Empty int */
1594 		ndev->stats.rx_over_errors++;
1595 		netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1596 	}
1597 
1598 	if (intr_status & EESR_RFE) {
1599 		/* Receive FIFO Overflow int */
1600 		ndev->stats.rx_fifo_errors++;
1601 		netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1602 	}
1603 
1604 	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1605 		/* Address Error */
1606 		ndev->stats.tx_fifo_errors++;
1607 		netif_err(mdp, tx_err, ndev, "Address Error\n");
1608 	}
1609 
1610 	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1611 	if (mdp->cd->no_ade)
1612 		mask &= ~EESR_ADE;
1613 	if (intr_status & mask) {
1614 		/* Tx error */
1615 		u32 edtrr = sh_eth_read(ndev, EDTRR);
1616 
1617 		/* dmesg */
1618 		netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1619 			   intr_status, mdp->cur_tx, mdp->dirty_tx,
1620 			   (u32)ndev->state, edtrr);
1621 		/* dirty buffer free */
1622 		sh_eth_txfree(ndev);
1623 
1624 		/* SH7712 BUG */
1625 		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1626 			/* tx dma start */
1627 			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1628 		}
1629 		/* wakeup */
1630 		netif_wake_queue(ndev);
1631 	}
1632 }
1633 
1634 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1635 {
1636 	struct net_device *ndev = netdev;
1637 	struct sh_eth_private *mdp = netdev_priv(ndev);
1638 	struct sh_eth_cpu_data *cd = mdp->cd;
1639 	irqreturn_t ret = IRQ_NONE;
1640 	unsigned long intr_status, intr_enable;
1641 
1642 	spin_lock(&mdp->lock);
1643 
1644 	/* Get interrupt status */
1645 	intr_status = sh_eth_read(ndev, EESR);
1646 	/* Mask it with the interrupt mask, forcing ECI interrupt to be always
1647 	 * enabled since it's the one that  comes thru regardless of the mask,
1648 	 * and we need to fully handle it in sh_eth_error() in order to quench
1649 	 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1650 	 */
1651 	intr_enable = sh_eth_read(ndev, EESIPR);
1652 	intr_status &= intr_enable | DMAC_M_ECI;
1653 	if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1654 		ret = IRQ_HANDLED;
1655 	else
1656 		goto other_irq;
1657 
1658 	if (intr_status & EESR_RX_CHECK) {
1659 		if (napi_schedule_prep(&mdp->napi)) {
1660 			/* Mask Rx interrupts */
1661 			sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1662 				     EESIPR);
1663 			__napi_schedule(&mdp->napi);
1664 		} else {
1665 			netdev_warn(ndev,
1666 				    "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1667 				    intr_status, intr_enable);
1668 		}
1669 	}
1670 
1671 	/* Tx Check */
1672 	if (intr_status & cd->tx_check) {
1673 		/* Clear Tx interrupts */
1674 		sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1675 
1676 		sh_eth_txfree(ndev);
1677 		netif_wake_queue(ndev);
1678 	}
1679 
1680 	if (intr_status & cd->eesr_err_check) {
1681 		/* Clear error interrupts */
1682 		sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1683 
1684 		sh_eth_error(ndev, intr_status);
1685 	}
1686 
1687 other_irq:
1688 	spin_unlock(&mdp->lock);
1689 
1690 	return ret;
1691 }
1692 
1693 static int sh_eth_poll(struct napi_struct *napi, int budget)
1694 {
1695 	struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1696 						  napi);
1697 	struct net_device *ndev = napi->dev;
1698 	int quota = budget;
1699 	unsigned long intr_status;
1700 
1701 	for (;;) {
1702 		intr_status = sh_eth_read(ndev, EESR);
1703 		if (!(intr_status & EESR_RX_CHECK))
1704 			break;
1705 		/* Clear Rx interrupts */
1706 		sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1707 
1708 		if (sh_eth_rx(ndev, intr_status, &quota))
1709 			goto out;
1710 	}
1711 
1712 	napi_complete(napi);
1713 
1714 	/* Reenable Rx interrupts */
1715 	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1716 out:
1717 	return budget - quota;
1718 }
1719 
1720 /* PHY state control function */
1721 static void sh_eth_adjust_link(struct net_device *ndev)
1722 {
1723 	struct sh_eth_private *mdp = netdev_priv(ndev);
1724 	struct phy_device *phydev = mdp->phydev;
1725 	int new_state = 0;
1726 
1727 	if (phydev->link) {
1728 		if (phydev->duplex != mdp->duplex) {
1729 			new_state = 1;
1730 			mdp->duplex = phydev->duplex;
1731 			if (mdp->cd->set_duplex)
1732 				mdp->cd->set_duplex(ndev);
1733 		}
1734 
1735 		if (phydev->speed != mdp->speed) {
1736 			new_state = 1;
1737 			mdp->speed = phydev->speed;
1738 			if (mdp->cd->set_rate)
1739 				mdp->cd->set_rate(ndev);
1740 		}
1741 		if (!mdp->link) {
1742 			sh_eth_write(ndev,
1743 				     sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1744 				     ECMR);
1745 			new_state = 1;
1746 			mdp->link = phydev->link;
1747 			if (mdp->cd->no_psr || mdp->no_ether_link)
1748 				sh_eth_rcv_snd_enable(ndev);
1749 		}
1750 	} else if (mdp->link) {
1751 		new_state = 1;
1752 		mdp->link = 0;
1753 		mdp->speed = 0;
1754 		mdp->duplex = -1;
1755 		if (mdp->cd->no_psr || mdp->no_ether_link)
1756 			sh_eth_rcv_snd_disable(ndev);
1757 	}
1758 
1759 	if (new_state && netif_msg_link(mdp))
1760 		phy_print_status(phydev);
1761 }
1762 
1763 /* PHY init function */
1764 static int sh_eth_phy_init(struct net_device *ndev)
1765 {
1766 	struct device_node *np = ndev->dev.parent->of_node;
1767 	struct sh_eth_private *mdp = netdev_priv(ndev);
1768 	struct phy_device *phydev = NULL;
1769 
1770 	mdp->link = 0;
1771 	mdp->speed = 0;
1772 	mdp->duplex = -1;
1773 
1774 	/* Try connect to PHY */
1775 	if (np) {
1776 		struct device_node *pn;
1777 
1778 		pn = of_parse_phandle(np, "phy-handle", 0);
1779 		phydev = of_phy_connect(ndev, pn,
1780 					sh_eth_adjust_link, 0,
1781 					mdp->phy_interface);
1782 
1783 		if (!phydev)
1784 			phydev = ERR_PTR(-ENOENT);
1785 	} else {
1786 		char phy_id[MII_BUS_ID_SIZE + 3];
1787 
1788 		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1789 			 mdp->mii_bus->id, mdp->phy_id);
1790 
1791 		phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1792 				     mdp->phy_interface);
1793 	}
1794 
1795 	if (IS_ERR(phydev)) {
1796 		netdev_err(ndev, "failed to connect PHY\n");
1797 		return PTR_ERR(phydev);
1798 	}
1799 
1800 	netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1801 		    phydev->addr, phydev->irq, phydev->drv->name);
1802 
1803 	mdp->phydev = phydev;
1804 
1805 	return 0;
1806 }
1807 
1808 /* PHY control start function */
1809 static int sh_eth_phy_start(struct net_device *ndev)
1810 {
1811 	struct sh_eth_private *mdp = netdev_priv(ndev);
1812 	int ret;
1813 
1814 	ret = sh_eth_phy_init(ndev);
1815 	if (ret)
1816 		return ret;
1817 
1818 	phy_start(mdp->phydev);
1819 
1820 	return 0;
1821 }
1822 
1823 static int sh_eth_get_settings(struct net_device *ndev,
1824 			       struct ethtool_cmd *ecmd)
1825 {
1826 	struct sh_eth_private *mdp = netdev_priv(ndev);
1827 	unsigned long flags;
1828 	int ret;
1829 
1830 	spin_lock_irqsave(&mdp->lock, flags);
1831 	ret = phy_ethtool_gset(mdp->phydev, ecmd);
1832 	spin_unlock_irqrestore(&mdp->lock, flags);
1833 
1834 	return ret;
1835 }
1836 
1837 static int sh_eth_set_settings(struct net_device *ndev,
1838 			       struct ethtool_cmd *ecmd)
1839 {
1840 	struct sh_eth_private *mdp = netdev_priv(ndev);
1841 	unsigned long flags;
1842 	int ret;
1843 
1844 	spin_lock_irqsave(&mdp->lock, flags);
1845 
1846 	/* disable tx and rx */
1847 	sh_eth_rcv_snd_disable(ndev);
1848 
1849 	ret = phy_ethtool_sset(mdp->phydev, ecmd);
1850 	if (ret)
1851 		goto error_exit;
1852 
1853 	if (ecmd->duplex == DUPLEX_FULL)
1854 		mdp->duplex = 1;
1855 	else
1856 		mdp->duplex = 0;
1857 
1858 	if (mdp->cd->set_duplex)
1859 		mdp->cd->set_duplex(ndev);
1860 
1861 error_exit:
1862 	mdelay(1);
1863 
1864 	/* enable tx and rx */
1865 	sh_eth_rcv_snd_enable(ndev);
1866 
1867 	spin_unlock_irqrestore(&mdp->lock, flags);
1868 
1869 	return ret;
1870 }
1871 
1872 static int sh_eth_nway_reset(struct net_device *ndev)
1873 {
1874 	struct sh_eth_private *mdp = netdev_priv(ndev);
1875 	unsigned long flags;
1876 	int ret;
1877 
1878 	spin_lock_irqsave(&mdp->lock, flags);
1879 	ret = phy_start_aneg(mdp->phydev);
1880 	spin_unlock_irqrestore(&mdp->lock, flags);
1881 
1882 	return ret;
1883 }
1884 
1885 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1886 {
1887 	struct sh_eth_private *mdp = netdev_priv(ndev);
1888 	return mdp->msg_enable;
1889 }
1890 
1891 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1892 {
1893 	struct sh_eth_private *mdp = netdev_priv(ndev);
1894 	mdp->msg_enable = value;
1895 }
1896 
1897 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1898 	"rx_current", "tx_current",
1899 	"rx_dirty", "tx_dirty",
1900 };
1901 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1902 
1903 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1904 {
1905 	switch (sset) {
1906 	case ETH_SS_STATS:
1907 		return SH_ETH_STATS_LEN;
1908 	default:
1909 		return -EOPNOTSUPP;
1910 	}
1911 }
1912 
1913 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1914 				     struct ethtool_stats *stats, u64 *data)
1915 {
1916 	struct sh_eth_private *mdp = netdev_priv(ndev);
1917 	int i = 0;
1918 
1919 	/* device-specific stats */
1920 	data[i++] = mdp->cur_rx;
1921 	data[i++] = mdp->cur_tx;
1922 	data[i++] = mdp->dirty_rx;
1923 	data[i++] = mdp->dirty_tx;
1924 }
1925 
1926 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1927 {
1928 	switch (stringset) {
1929 	case ETH_SS_STATS:
1930 		memcpy(data, *sh_eth_gstrings_stats,
1931 		       sizeof(sh_eth_gstrings_stats));
1932 		break;
1933 	}
1934 }
1935 
1936 static void sh_eth_get_ringparam(struct net_device *ndev,
1937 				 struct ethtool_ringparam *ring)
1938 {
1939 	struct sh_eth_private *mdp = netdev_priv(ndev);
1940 
1941 	ring->rx_max_pending = RX_RING_MAX;
1942 	ring->tx_max_pending = TX_RING_MAX;
1943 	ring->rx_pending = mdp->num_rx_ring;
1944 	ring->tx_pending = mdp->num_tx_ring;
1945 }
1946 
1947 static int sh_eth_set_ringparam(struct net_device *ndev,
1948 				struct ethtool_ringparam *ring)
1949 {
1950 	struct sh_eth_private *mdp = netdev_priv(ndev);
1951 	int ret;
1952 
1953 	if (ring->tx_pending > TX_RING_MAX ||
1954 	    ring->rx_pending > RX_RING_MAX ||
1955 	    ring->tx_pending < TX_RING_MIN ||
1956 	    ring->rx_pending < RX_RING_MIN)
1957 		return -EINVAL;
1958 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1959 		return -EINVAL;
1960 
1961 	if (netif_running(ndev)) {
1962 		netif_tx_disable(ndev);
1963 		/* Disable interrupts by clearing the interrupt mask. */
1964 		sh_eth_write(ndev, 0x0000, EESIPR);
1965 		/* Stop the chip's Tx and Rx processes. */
1966 		sh_eth_write(ndev, 0, EDTRR);
1967 		sh_eth_write(ndev, 0, EDRRR);
1968 		synchronize_irq(ndev->irq);
1969 	}
1970 
1971 	/* Free all the skbuffs in the Rx queue. */
1972 	sh_eth_ring_free(ndev);
1973 	/* Free DMA buffer */
1974 	sh_eth_free_dma_buffer(mdp);
1975 
1976 	/* Set new parameters */
1977 	mdp->num_rx_ring = ring->rx_pending;
1978 	mdp->num_tx_ring = ring->tx_pending;
1979 
1980 	ret = sh_eth_ring_init(ndev);
1981 	if (ret < 0) {
1982 		netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
1983 		return ret;
1984 	}
1985 	ret = sh_eth_dev_init(ndev, false);
1986 	if (ret < 0) {
1987 		netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
1988 		return ret;
1989 	}
1990 
1991 	if (netif_running(ndev)) {
1992 		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1993 		/* Setting the Rx mode will start the Rx process. */
1994 		sh_eth_write(ndev, EDRRR_R, EDRRR);
1995 		netif_wake_queue(ndev);
1996 	}
1997 
1998 	return 0;
1999 }
2000 
2001 static const struct ethtool_ops sh_eth_ethtool_ops = {
2002 	.get_settings	= sh_eth_get_settings,
2003 	.set_settings	= sh_eth_set_settings,
2004 	.nway_reset	= sh_eth_nway_reset,
2005 	.get_msglevel	= sh_eth_get_msglevel,
2006 	.set_msglevel	= sh_eth_set_msglevel,
2007 	.get_link	= ethtool_op_get_link,
2008 	.get_strings	= sh_eth_get_strings,
2009 	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
2010 	.get_sset_count     = sh_eth_get_sset_count,
2011 	.get_ringparam	= sh_eth_get_ringparam,
2012 	.set_ringparam	= sh_eth_set_ringparam,
2013 };
2014 
2015 /* network device open function */
2016 static int sh_eth_open(struct net_device *ndev)
2017 {
2018 	int ret = 0;
2019 	struct sh_eth_private *mdp = netdev_priv(ndev);
2020 
2021 	pm_runtime_get_sync(&mdp->pdev->dev);
2022 
2023 	napi_enable(&mdp->napi);
2024 
2025 	ret = request_irq(ndev->irq, sh_eth_interrupt,
2026 			  mdp->cd->irq_flags, ndev->name, ndev);
2027 	if (ret) {
2028 		netdev_err(ndev, "Can not assign IRQ number\n");
2029 		goto out_napi_off;
2030 	}
2031 
2032 	/* Descriptor set */
2033 	ret = sh_eth_ring_init(ndev);
2034 	if (ret)
2035 		goto out_free_irq;
2036 
2037 	/* device init */
2038 	ret = sh_eth_dev_init(ndev, true);
2039 	if (ret)
2040 		goto out_free_irq;
2041 
2042 	/* PHY control start*/
2043 	ret = sh_eth_phy_start(ndev);
2044 	if (ret)
2045 		goto out_free_irq;
2046 
2047 	mdp->is_opened = 1;
2048 
2049 	return ret;
2050 
2051 out_free_irq:
2052 	free_irq(ndev->irq, ndev);
2053 out_napi_off:
2054 	napi_disable(&mdp->napi);
2055 	pm_runtime_put_sync(&mdp->pdev->dev);
2056 	return ret;
2057 }
2058 
2059 /* Timeout function */
2060 static void sh_eth_tx_timeout(struct net_device *ndev)
2061 {
2062 	struct sh_eth_private *mdp = netdev_priv(ndev);
2063 	struct sh_eth_rxdesc *rxdesc;
2064 	int i;
2065 
2066 	netif_stop_queue(ndev);
2067 
2068 	netif_err(mdp, timer, ndev,
2069 		  "transmit timed out, status %8.8x, resetting...\n",
2070 		  (int)sh_eth_read(ndev, EESR));
2071 
2072 	/* tx_errors count up */
2073 	ndev->stats.tx_errors++;
2074 
2075 	/* Free all the skbuffs in the Rx queue. */
2076 	for (i = 0; i < mdp->num_rx_ring; i++) {
2077 		rxdesc = &mdp->rx_ring[i];
2078 		rxdesc->status = 0;
2079 		rxdesc->addr = 0xBADF00D0;
2080 		dev_kfree_skb(mdp->rx_skbuff[i]);
2081 		mdp->rx_skbuff[i] = NULL;
2082 	}
2083 	for (i = 0; i < mdp->num_tx_ring; i++) {
2084 		dev_kfree_skb(mdp->tx_skbuff[i]);
2085 		mdp->tx_skbuff[i] = NULL;
2086 	}
2087 
2088 	/* device init */
2089 	sh_eth_dev_init(ndev, true);
2090 }
2091 
2092 /* Packet transmit function */
2093 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2094 {
2095 	struct sh_eth_private *mdp = netdev_priv(ndev);
2096 	struct sh_eth_txdesc *txdesc;
2097 	u32 entry;
2098 	unsigned long flags;
2099 
2100 	spin_lock_irqsave(&mdp->lock, flags);
2101 	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2102 		if (!sh_eth_txfree(ndev)) {
2103 			netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2104 			netif_stop_queue(ndev);
2105 			spin_unlock_irqrestore(&mdp->lock, flags);
2106 			return NETDEV_TX_BUSY;
2107 		}
2108 	}
2109 	spin_unlock_irqrestore(&mdp->lock, flags);
2110 
2111 	entry = mdp->cur_tx % mdp->num_tx_ring;
2112 	mdp->tx_skbuff[entry] = skb;
2113 	txdesc = &mdp->tx_ring[entry];
2114 	/* soft swap. */
2115 	if (!mdp->cd->hw_swap)
2116 		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2117 				 skb->len + 2);
2118 	txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2119 				      DMA_TO_DEVICE);
2120 	if (skb->len < ETH_ZLEN)
2121 		txdesc->buffer_length = ETH_ZLEN;
2122 	else
2123 		txdesc->buffer_length = skb->len;
2124 
2125 	if (entry >= mdp->num_tx_ring - 1)
2126 		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2127 	else
2128 		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2129 
2130 	mdp->cur_tx++;
2131 
2132 	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2133 		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2134 
2135 	return NETDEV_TX_OK;
2136 }
2137 
2138 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2139 {
2140 	struct sh_eth_private *mdp = netdev_priv(ndev);
2141 
2142 	if (sh_eth_is_rz_fast_ether(mdp))
2143 		return &ndev->stats;
2144 
2145 	if (!mdp->is_opened)
2146 		return &ndev->stats;
2147 
2148 	ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2149 	sh_eth_write(ndev, 0, TROCR);	/* (write clear) */
2150 	ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2151 	sh_eth_write(ndev, 0, CDCR);	/* (write clear) */
2152 	ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2153 	sh_eth_write(ndev, 0, LCCR);	/* (write clear) */
2154 
2155 	if (sh_eth_is_gether(mdp)) {
2156 		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2157 		sh_eth_write(ndev, 0, CERCR);	/* (write clear) */
2158 		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2159 		sh_eth_write(ndev, 0, CEECR);	/* (write clear) */
2160 	} else {
2161 		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2162 		sh_eth_write(ndev, 0, CNDCR);	/* (write clear) */
2163 	}
2164 
2165 	return &ndev->stats;
2166 }
2167 
2168 /* device close function */
2169 static int sh_eth_close(struct net_device *ndev)
2170 {
2171 	struct sh_eth_private *mdp = netdev_priv(ndev);
2172 
2173 	netif_stop_queue(ndev);
2174 
2175 	/* Disable interrupts by clearing the interrupt mask. */
2176 	sh_eth_write(ndev, 0x0000, EESIPR);
2177 
2178 	/* Stop the chip's Tx and Rx processes. */
2179 	sh_eth_write(ndev, 0, EDTRR);
2180 	sh_eth_write(ndev, 0, EDRRR);
2181 
2182 	sh_eth_get_stats(ndev);
2183 	/* PHY Disconnect */
2184 	if (mdp->phydev) {
2185 		phy_stop(mdp->phydev);
2186 		phy_disconnect(mdp->phydev);
2187 	}
2188 
2189 	free_irq(ndev->irq, ndev);
2190 
2191 	napi_disable(&mdp->napi);
2192 
2193 	/* Free all the skbuffs in the Rx queue. */
2194 	sh_eth_ring_free(ndev);
2195 
2196 	/* free DMA buffer */
2197 	sh_eth_free_dma_buffer(mdp);
2198 
2199 	pm_runtime_put_sync(&mdp->pdev->dev);
2200 
2201 	mdp->is_opened = 0;
2202 
2203 	return 0;
2204 }
2205 
2206 /* ioctl to device function */
2207 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2208 {
2209 	struct sh_eth_private *mdp = netdev_priv(ndev);
2210 	struct phy_device *phydev = mdp->phydev;
2211 
2212 	if (!netif_running(ndev))
2213 		return -EINVAL;
2214 
2215 	if (!phydev)
2216 		return -ENODEV;
2217 
2218 	return phy_mii_ioctl(phydev, rq, cmd);
2219 }
2220 
2221 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2222 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2223 					    int entry)
2224 {
2225 	return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2226 }
2227 
2228 static u32 sh_eth_tsu_get_post_mask(int entry)
2229 {
2230 	return 0x0f << (28 - ((entry % 8) * 4));
2231 }
2232 
2233 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2234 {
2235 	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2236 }
2237 
2238 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2239 					     int entry)
2240 {
2241 	struct sh_eth_private *mdp = netdev_priv(ndev);
2242 	u32 tmp;
2243 	void *reg_offset;
2244 
2245 	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2246 	tmp = ioread32(reg_offset);
2247 	iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2248 }
2249 
2250 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2251 					      int entry)
2252 {
2253 	struct sh_eth_private *mdp = netdev_priv(ndev);
2254 	u32 post_mask, ref_mask, tmp;
2255 	void *reg_offset;
2256 
2257 	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2258 	post_mask = sh_eth_tsu_get_post_mask(entry);
2259 	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2260 
2261 	tmp = ioread32(reg_offset);
2262 	iowrite32(tmp & ~post_mask, reg_offset);
2263 
2264 	/* If other port enables, the function returns "true" */
2265 	return tmp & ref_mask;
2266 }
2267 
2268 static int sh_eth_tsu_busy(struct net_device *ndev)
2269 {
2270 	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2271 	struct sh_eth_private *mdp = netdev_priv(ndev);
2272 
2273 	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2274 		udelay(10);
2275 		timeout--;
2276 		if (timeout <= 0) {
2277 			netdev_err(ndev, "%s: timeout\n", __func__);
2278 			return -ETIMEDOUT;
2279 		}
2280 	}
2281 
2282 	return 0;
2283 }
2284 
2285 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2286 				  const u8 *addr)
2287 {
2288 	u32 val;
2289 
2290 	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2291 	iowrite32(val, reg);
2292 	if (sh_eth_tsu_busy(ndev) < 0)
2293 		return -EBUSY;
2294 
2295 	val = addr[4] << 8 | addr[5];
2296 	iowrite32(val, reg + 4);
2297 	if (sh_eth_tsu_busy(ndev) < 0)
2298 		return -EBUSY;
2299 
2300 	return 0;
2301 }
2302 
2303 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2304 {
2305 	u32 val;
2306 
2307 	val = ioread32(reg);
2308 	addr[0] = (val >> 24) & 0xff;
2309 	addr[1] = (val >> 16) & 0xff;
2310 	addr[2] = (val >> 8) & 0xff;
2311 	addr[3] = val & 0xff;
2312 	val = ioread32(reg + 4);
2313 	addr[4] = (val >> 8) & 0xff;
2314 	addr[5] = val & 0xff;
2315 }
2316 
2317 
2318 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2319 {
2320 	struct sh_eth_private *mdp = netdev_priv(ndev);
2321 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2322 	int i;
2323 	u8 c_addr[ETH_ALEN];
2324 
2325 	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2326 		sh_eth_tsu_read_entry(reg_offset, c_addr);
2327 		if (ether_addr_equal(addr, c_addr))
2328 			return i;
2329 	}
2330 
2331 	return -ENOENT;
2332 }
2333 
2334 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2335 {
2336 	u8 blank[ETH_ALEN];
2337 	int entry;
2338 
2339 	memset(blank, 0, sizeof(blank));
2340 	entry = sh_eth_tsu_find_entry(ndev, blank);
2341 	return (entry < 0) ? -ENOMEM : entry;
2342 }
2343 
2344 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2345 					      int entry)
2346 {
2347 	struct sh_eth_private *mdp = netdev_priv(ndev);
2348 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2349 	int ret;
2350 	u8 blank[ETH_ALEN];
2351 
2352 	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2353 			 ~(1 << (31 - entry)), TSU_TEN);
2354 
2355 	memset(blank, 0, sizeof(blank));
2356 	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2357 	if (ret < 0)
2358 		return ret;
2359 	return 0;
2360 }
2361 
2362 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2363 {
2364 	struct sh_eth_private *mdp = netdev_priv(ndev);
2365 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2366 	int i, ret;
2367 
2368 	if (!mdp->cd->tsu)
2369 		return 0;
2370 
2371 	i = sh_eth_tsu_find_entry(ndev, addr);
2372 	if (i < 0) {
2373 		/* No entry found, create one */
2374 		i = sh_eth_tsu_find_empty(ndev);
2375 		if (i < 0)
2376 			return -ENOMEM;
2377 		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2378 		if (ret < 0)
2379 			return ret;
2380 
2381 		/* Enable the entry */
2382 		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2383 				 (1 << (31 - i)), TSU_TEN);
2384 	}
2385 
2386 	/* Entry found or created, enable POST */
2387 	sh_eth_tsu_enable_cam_entry_post(ndev, i);
2388 
2389 	return 0;
2390 }
2391 
2392 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2393 {
2394 	struct sh_eth_private *mdp = netdev_priv(ndev);
2395 	int i, ret;
2396 
2397 	if (!mdp->cd->tsu)
2398 		return 0;
2399 
2400 	i = sh_eth_tsu_find_entry(ndev, addr);
2401 	if (i) {
2402 		/* Entry found */
2403 		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2404 			goto done;
2405 
2406 		/* Disable the entry if both ports was disabled */
2407 		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2408 		if (ret < 0)
2409 			return ret;
2410 	}
2411 done:
2412 	return 0;
2413 }
2414 
2415 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2416 {
2417 	struct sh_eth_private *mdp = netdev_priv(ndev);
2418 	int i, ret;
2419 
2420 	if (unlikely(!mdp->cd->tsu))
2421 		return 0;
2422 
2423 	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2424 		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2425 			continue;
2426 
2427 		/* Disable the entry if both ports was disabled */
2428 		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2429 		if (ret < 0)
2430 			return ret;
2431 	}
2432 
2433 	return 0;
2434 }
2435 
2436 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2437 {
2438 	struct sh_eth_private *mdp = netdev_priv(ndev);
2439 	u8 addr[ETH_ALEN];
2440 	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2441 	int i;
2442 
2443 	if (unlikely(!mdp->cd->tsu))
2444 		return;
2445 
2446 	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2447 		sh_eth_tsu_read_entry(reg_offset, addr);
2448 		if (is_multicast_ether_addr(addr))
2449 			sh_eth_tsu_del_entry(ndev, addr);
2450 	}
2451 }
2452 
2453 /* Multicast reception directions set */
2454 static void sh_eth_set_multicast_list(struct net_device *ndev)
2455 {
2456 	struct sh_eth_private *mdp = netdev_priv(ndev);
2457 	u32 ecmr_bits;
2458 	int mcast_all = 0;
2459 	unsigned long flags;
2460 
2461 	spin_lock_irqsave(&mdp->lock, flags);
2462 	/* Initial condition is MCT = 1, PRM = 0.
2463 	 * Depending on ndev->flags, set PRM or clear MCT
2464 	 */
2465 	ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2466 
2467 	if (!(ndev->flags & IFF_MULTICAST)) {
2468 		sh_eth_tsu_purge_mcast(ndev);
2469 		mcast_all = 1;
2470 	}
2471 	if (ndev->flags & IFF_ALLMULTI) {
2472 		sh_eth_tsu_purge_mcast(ndev);
2473 		ecmr_bits &= ~ECMR_MCT;
2474 		mcast_all = 1;
2475 	}
2476 
2477 	if (ndev->flags & IFF_PROMISC) {
2478 		sh_eth_tsu_purge_all(ndev);
2479 		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2480 	} else if (mdp->cd->tsu) {
2481 		struct netdev_hw_addr *ha;
2482 		netdev_for_each_mc_addr(ha, ndev) {
2483 			if (mcast_all && is_multicast_ether_addr(ha->addr))
2484 				continue;
2485 
2486 			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2487 				if (!mcast_all) {
2488 					sh_eth_tsu_purge_mcast(ndev);
2489 					ecmr_bits &= ~ECMR_MCT;
2490 					mcast_all = 1;
2491 				}
2492 			}
2493 		}
2494 	} else {
2495 		/* Normal, unicast/broadcast-only mode. */
2496 		ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2497 	}
2498 
2499 	/* update the ethernet mode */
2500 	sh_eth_write(ndev, ecmr_bits, ECMR);
2501 
2502 	spin_unlock_irqrestore(&mdp->lock, flags);
2503 }
2504 
2505 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2506 {
2507 	if (!mdp->port)
2508 		return TSU_VTAG0;
2509 	else
2510 		return TSU_VTAG1;
2511 }
2512 
2513 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2514 				  __be16 proto, u16 vid)
2515 {
2516 	struct sh_eth_private *mdp = netdev_priv(ndev);
2517 	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2518 
2519 	if (unlikely(!mdp->cd->tsu))
2520 		return -EPERM;
2521 
2522 	/* No filtering if vid = 0 */
2523 	if (!vid)
2524 		return 0;
2525 
2526 	mdp->vlan_num_ids++;
2527 
2528 	/* The controller has one VLAN tag HW filter. So, if the filter is
2529 	 * already enabled, the driver disables it and the filte
2530 	 */
2531 	if (mdp->vlan_num_ids > 1) {
2532 		/* disable VLAN filter */
2533 		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2534 		return 0;
2535 	}
2536 
2537 	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2538 			 vtag_reg_index);
2539 
2540 	return 0;
2541 }
2542 
2543 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2544 				   __be16 proto, u16 vid)
2545 {
2546 	struct sh_eth_private *mdp = netdev_priv(ndev);
2547 	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2548 
2549 	if (unlikely(!mdp->cd->tsu))
2550 		return -EPERM;
2551 
2552 	/* No filtering if vid = 0 */
2553 	if (!vid)
2554 		return 0;
2555 
2556 	mdp->vlan_num_ids--;
2557 	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2558 
2559 	return 0;
2560 }
2561 
2562 /* SuperH's TSU register init function */
2563 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2564 {
2565 	if (sh_eth_is_rz_fast_ether(mdp)) {
2566 		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2567 		return;
2568 	}
2569 
2570 	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
2571 	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
2572 	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
2573 	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2574 	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2575 	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2576 	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2577 	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2578 	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2579 	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2580 	if (sh_eth_is_gether(mdp)) {
2581 		sh_eth_tsu_write(mdp, 0, TSU_QTAG0);	/* Disable QTAG(0->1) */
2582 		sh_eth_tsu_write(mdp, 0, TSU_QTAG1);	/* Disable QTAG(1->0) */
2583 	} else {
2584 		sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
2585 		sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
2586 	}
2587 	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
2588 	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
2589 	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
2590 	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
2591 	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
2592 	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
2593 	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
2594 }
2595 
2596 /* MDIO bus release function */
2597 static int sh_mdio_release(struct sh_eth_private *mdp)
2598 {
2599 	/* unregister mdio bus */
2600 	mdiobus_unregister(mdp->mii_bus);
2601 
2602 	/* free bitbang info */
2603 	free_mdio_bitbang(mdp->mii_bus);
2604 
2605 	return 0;
2606 }
2607 
2608 /* MDIO bus init function */
2609 static int sh_mdio_init(struct sh_eth_private *mdp,
2610 			struct sh_eth_plat_data *pd)
2611 {
2612 	int ret, i;
2613 	struct bb_info *bitbang;
2614 	struct platform_device *pdev = mdp->pdev;
2615 	struct device *dev = &mdp->pdev->dev;
2616 
2617 	/* create bit control struct for PHY */
2618 	bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2619 	if (!bitbang)
2620 		return -ENOMEM;
2621 
2622 	/* bitbang init */
2623 	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2624 	bitbang->set_gate = pd->set_mdio_gate;
2625 	bitbang->mdi_msk = PIR_MDI;
2626 	bitbang->mdo_msk = PIR_MDO;
2627 	bitbang->mmd_msk = PIR_MMD;
2628 	bitbang->mdc_msk = PIR_MDC;
2629 	bitbang->ctrl.ops = &bb_ops;
2630 
2631 	/* MII controller setting */
2632 	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2633 	if (!mdp->mii_bus)
2634 		return -ENOMEM;
2635 
2636 	/* Hook up MII support for ethtool */
2637 	mdp->mii_bus->name = "sh_mii";
2638 	mdp->mii_bus->parent = dev;
2639 	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2640 		 pdev->name, pdev->id);
2641 
2642 	/* PHY IRQ */
2643 	mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2644 					       GFP_KERNEL);
2645 	if (!mdp->mii_bus->irq) {
2646 		ret = -ENOMEM;
2647 		goto out_free_bus;
2648 	}
2649 
2650 	/* register MDIO bus */
2651 	if (dev->of_node) {
2652 		ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2653 	} else {
2654 		for (i = 0; i < PHY_MAX_ADDR; i++)
2655 			mdp->mii_bus->irq[i] = PHY_POLL;
2656 		if (pd->phy_irq > 0)
2657 			mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2658 
2659 		ret = mdiobus_register(mdp->mii_bus);
2660 	}
2661 
2662 	if (ret)
2663 		goto out_free_bus;
2664 
2665 	return 0;
2666 
2667 out_free_bus:
2668 	free_mdio_bitbang(mdp->mii_bus);
2669 	return ret;
2670 }
2671 
2672 static const u16 *sh_eth_get_register_offset(int register_type)
2673 {
2674 	const u16 *reg_offset = NULL;
2675 
2676 	switch (register_type) {
2677 	case SH_ETH_REG_GIGABIT:
2678 		reg_offset = sh_eth_offset_gigabit;
2679 		break;
2680 	case SH_ETH_REG_FAST_RZ:
2681 		reg_offset = sh_eth_offset_fast_rz;
2682 		break;
2683 	case SH_ETH_REG_FAST_RCAR:
2684 		reg_offset = sh_eth_offset_fast_rcar;
2685 		break;
2686 	case SH_ETH_REG_FAST_SH4:
2687 		reg_offset = sh_eth_offset_fast_sh4;
2688 		break;
2689 	case SH_ETH_REG_FAST_SH3_SH2:
2690 		reg_offset = sh_eth_offset_fast_sh3_sh2;
2691 		break;
2692 	default:
2693 		break;
2694 	}
2695 
2696 	return reg_offset;
2697 }
2698 
2699 static const struct net_device_ops sh_eth_netdev_ops = {
2700 	.ndo_open		= sh_eth_open,
2701 	.ndo_stop		= sh_eth_close,
2702 	.ndo_start_xmit		= sh_eth_start_xmit,
2703 	.ndo_get_stats		= sh_eth_get_stats,
2704 	.ndo_tx_timeout		= sh_eth_tx_timeout,
2705 	.ndo_do_ioctl		= sh_eth_do_ioctl,
2706 	.ndo_validate_addr	= eth_validate_addr,
2707 	.ndo_set_mac_address	= eth_mac_addr,
2708 	.ndo_change_mtu		= eth_change_mtu,
2709 };
2710 
2711 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2712 	.ndo_open		= sh_eth_open,
2713 	.ndo_stop		= sh_eth_close,
2714 	.ndo_start_xmit		= sh_eth_start_xmit,
2715 	.ndo_get_stats		= sh_eth_get_stats,
2716 	.ndo_set_rx_mode	= sh_eth_set_multicast_list,
2717 	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
2718 	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
2719 	.ndo_tx_timeout		= sh_eth_tx_timeout,
2720 	.ndo_do_ioctl		= sh_eth_do_ioctl,
2721 	.ndo_validate_addr	= eth_validate_addr,
2722 	.ndo_set_mac_address	= eth_mac_addr,
2723 	.ndo_change_mtu		= eth_change_mtu,
2724 };
2725 
2726 #ifdef CONFIG_OF
2727 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2728 {
2729 	struct device_node *np = dev->of_node;
2730 	struct sh_eth_plat_data *pdata;
2731 	const char *mac_addr;
2732 
2733 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2734 	if (!pdata)
2735 		return NULL;
2736 
2737 	pdata->phy_interface = of_get_phy_mode(np);
2738 
2739 	mac_addr = of_get_mac_address(np);
2740 	if (mac_addr)
2741 		memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2742 
2743 	pdata->no_ether_link =
2744 		of_property_read_bool(np, "renesas,no-ether-link");
2745 	pdata->ether_link_active_low =
2746 		of_property_read_bool(np, "renesas,ether-link-active-low");
2747 
2748 	return pdata;
2749 }
2750 
2751 static const struct of_device_id sh_eth_match_table[] = {
2752 	{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2753 	{ .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2754 	{ .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2755 	{ .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2756 	{ .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2757 	{ .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
2758 	{ .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
2759 	{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2760 	{ }
2761 };
2762 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2763 #else
2764 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2765 {
2766 	return NULL;
2767 }
2768 #endif
2769 
2770 static int sh_eth_drv_probe(struct platform_device *pdev)
2771 {
2772 	int ret, devno = 0;
2773 	struct resource *res;
2774 	struct net_device *ndev = NULL;
2775 	struct sh_eth_private *mdp = NULL;
2776 	struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2777 	const struct platform_device_id *id = platform_get_device_id(pdev);
2778 
2779 	/* get base addr */
2780 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2781 
2782 	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2783 	if (!ndev)
2784 		return -ENOMEM;
2785 
2786 	pm_runtime_enable(&pdev->dev);
2787 	pm_runtime_get_sync(&pdev->dev);
2788 
2789 	devno = pdev->id;
2790 	if (devno < 0)
2791 		devno = 0;
2792 
2793 	ndev->dma = -1;
2794 	ret = platform_get_irq(pdev, 0);
2795 	if (ret < 0) {
2796 		ret = -ENODEV;
2797 		goto out_release;
2798 	}
2799 	ndev->irq = ret;
2800 
2801 	SET_NETDEV_DEV(ndev, &pdev->dev);
2802 
2803 	mdp = netdev_priv(ndev);
2804 	mdp->num_tx_ring = TX_RING_SIZE;
2805 	mdp->num_rx_ring = RX_RING_SIZE;
2806 	mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2807 	if (IS_ERR(mdp->addr)) {
2808 		ret = PTR_ERR(mdp->addr);
2809 		goto out_release;
2810 	}
2811 
2812 	ndev->base_addr = res->start;
2813 
2814 	spin_lock_init(&mdp->lock);
2815 	mdp->pdev = pdev;
2816 
2817 	if (pdev->dev.of_node)
2818 		pd = sh_eth_parse_dt(&pdev->dev);
2819 	if (!pd) {
2820 		dev_err(&pdev->dev, "no platform data\n");
2821 		ret = -EINVAL;
2822 		goto out_release;
2823 	}
2824 
2825 	/* get PHY ID */
2826 	mdp->phy_id = pd->phy;
2827 	mdp->phy_interface = pd->phy_interface;
2828 	/* EDMAC endian */
2829 	mdp->edmac_endian = pd->edmac_endian;
2830 	mdp->no_ether_link = pd->no_ether_link;
2831 	mdp->ether_link_active_low = pd->ether_link_active_low;
2832 
2833 	/* set cpu data */
2834 	if (id) {
2835 		mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2836 	} else	{
2837 		const struct of_device_id *match;
2838 
2839 		match = of_match_device(of_match_ptr(sh_eth_match_table),
2840 					&pdev->dev);
2841 		mdp->cd = (struct sh_eth_cpu_data *)match->data;
2842 	}
2843 	mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2844 	if (!mdp->reg_offset) {
2845 		dev_err(&pdev->dev, "Unknown register type (%d)\n",
2846 			mdp->cd->register_type);
2847 		ret = -EINVAL;
2848 		goto out_release;
2849 	}
2850 	sh_eth_set_default_cpu_data(mdp->cd);
2851 
2852 	/* set function */
2853 	if (mdp->cd->tsu)
2854 		ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2855 	else
2856 		ndev->netdev_ops = &sh_eth_netdev_ops;
2857 	ndev->ethtool_ops = &sh_eth_ethtool_ops;
2858 	ndev->watchdog_timeo = TX_TIMEOUT;
2859 
2860 	/* debug message level */
2861 	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2862 
2863 	/* read and set MAC address */
2864 	read_mac_address(ndev, pd->mac_addr);
2865 	if (!is_valid_ether_addr(ndev->dev_addr)) {
2866 		dev_warn(&pdev->dev,
2867 			 "no valid MAC address supplied, using a random one.\n");
2868 		eth_hw_addr_random(ndev);
2869 	}
2870 
2871 	/* ioremap the TSU registers */
2872 	if (mdp->cd->tsu) {
2873 		struct resource *rtsu;
2874 		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2875 		mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2876 		if (IS_ERR(mdp->tsu_addr)) {
2877 			ret = PTR_ERR(mdp->tsu_addr);
2878 			goto out_release;
2879 		}
2880 		mdp->port = devno % 2;
2881 		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2882 	}
2883 
2884 	/* initialize first or needed device */
2885 	if (!devno || pd->needs_init) {
2886 		if (mdp->cd->chip_reset)
2887 			mdp->cd->chip_reset(ndev);
2888 
2889 		if (mdp->cd->tsu) {
2890 			/* TSU init (Init only)*/
2891 			sh_eth_tsu_init(mdp);
2892 		}
2893 	}
2894 
2895 	if (mdp->cd->rmiimode)
2896 		sh_eth_write(ndev, 0x1, RMIIMODE);
2897 
2898 	/* MDIO bus init */
2899 	ret = sh_mdio_init(mdp, pd);
2900 	if (ret) {
2901 		dev_err(&ndev->dev, "failed to initialise MDIO\n");
2902 		goto out_release;
2903 	}
2904 
2905 	netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2906 
2907 	/* network device register */
2908 	ret = register_netdev(ndev);
2909 	if (ret)
2910 		goto out_napi_del;
2911 
2912 	/* print device information */
2913 	netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
2914 		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2915 
2916 	pm_runtime_put(&pdev->dev);
2917 	platform_set_drvdata(pdev, ndev);
2918 
2919 	return ret;
2920 
2921 out_napi_del:
2922 	netif_napi_del(&mdp->napi);
2923 	sh_mdio_release(mdp);
2924 
2925 out_release:
2926 	/* net_dev free */
2927 	if (ndev)
2928 		free_netdev(ndev);
2929 
2930 	pm_runtime_put(&pdev->dev);
2931 	pm_runtime_disable(&pdev->dev);
2932 	return ret;
2933 }
2934 
2935 static int sh_eth_drv_remove(struct platform_device *pdev)
2936 {
2937 	struct net_device *ndev = platform_get_drvdata(pdev);
2938 	struct sh_eth_private *mdp = netdev_priv(ndev);
2939 
2940 	unregister_netdev(ndev);
2941 	netif_napi_del(&mdp->napi);
2942 	sh_mdio_release(mdp);
2943 	pm_runtime_disable(&pdev->dev);
2944 	free_netdev(ndev);
2945 
2946 	return 0;
2947 }
2948 
2949 #ifdef CONFIG_PM
2950 static int sh_eth_runtime_nop(struct device *dev)
2951 {
2952 	/* Runtime PM callback shared between ->runtime_suspend()
2953 	 * and ->runtime_resume(). Simply returns success.
2954 	 *
2955 	 * This driver re-initializes all registers after
2956 	 * pm_runtime_get_sync() anyway so there is no need
2957 	 * to save and restore registers here.
2958 	 */
2959 	return 0;
2960 }
2961 
2962 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
2963 	.runtime_suspend = sh_eth_runtime_nop,
2964 	.runtime_resume = sh_eth_runtime_nop,
2965 };
2966 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2967 #else
2968 #define SH_ETH_PM_OPS NULL
2969 #endif
2970 
2971 static struct platform_device_id sh_eth_id_table[] = {
2972 	{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
2973 	{ "sh771x-ether", (kernel_ulong_t)&sh771x_data },
2974 	{ "sh7724-ether", (kernel_ulong_t)&sh7724_data },
2975 	{ "sh7734-gether", (kernel_ulong_t)&sh7734_data },
2976 	{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2977 	{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
2978 	{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2979 	{ "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
2980 	{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2981 	{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2982 	{ "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
2983 	{ "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
2984 	{ "r8a7793-ether", (kernel_ulong_t)&r8a779x_data },
2985 	{ "r8a7794-ether", (kernel_ulong_t)&r8a779x_data },
2986 	{ }
2987 };
2988 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2989 
2990 static struct platform_driver sh_eth_driver = {
2991 	.probe = sh_eth_drv_probe,
2992 	.remove = sh_eth_drv_remove,
2993 	.id_table = sh_eth_id_table,
2994 	.driver = {
2995 		   .name = CARDNAME,
2996 		   .pm = SH_ETH_PM_OPS,
2997 		   .of_match_table = of_match_ptr(sh_eth_match_table),
2998 	},
2999 };
3000 
3001 module_platform_driver(sh_eth_driver);
3002 
3003 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3004 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3005 MODULE_LICENSE("GPL v2");
3006