xref: /openbmc/linux/drivers/net/ethernet/realtek/r8169_main.c (revision a1b2f04ea527397fcacacd09e0d690927feef429)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4  *
5  * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
6  * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
7  * Copyright (c) a lot of people too. Please respect their work.
8  *
9  * See MAINTAINERS file for support contact information.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/pci.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/clk.h>
18 #include <linux/delay.h>
19 #include <linux/ethtool.h>
20 #include <linux/phy.h>
21 #include <linux/if_vlan.h>
22 #include <linux/crc32.h>
23 #include <linux/in.h>
24 #include <linux/io.h>
25 #include <linux/ip.h>
26 #include <linux/tcp.h>
27 #include <linux/interrupt.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/prefetch.h>
31 #include <linux/pci-aspm.h>
32 #include <linux/ipv6.h>
33 #include <net/ip6_checksum.h>
34 
35 #include "r8169_firmware.h"
36 
37 #define MODULENAME "r8169"
38 
39 #define FIRMWARE_8168D_1	"rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2	"rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1	"rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2	"rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3	"rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1	"rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2	"rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1	"rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8411_2		"rtl_nic/rtl8411-2.fw"
50 #define FIRMWARE_8106E_1	"rtl_nic/rtl8106e-1.fw"
51 #define FIRMWARE_8106E_2	"rtl_nic/rtl8106e-2.fw"
52 #define FIRMWARE_8168G_2	"rtl_nic/rtl8168g-2.fw"
53 #define FIRMWARE_8168G_3	"rtl_nic/rtl8168g-3.fw"
54 #define FIRMWARE_8168H_1	"rtl_nic/rtl8168h-1.fw"
55 #define FIRMWARE_8168H_2	"rtl_nic/rtl8168h-2.fw"
56 #define FIRMWARE_8107E_1	"rtl_nic/rtl8107e-1.fw"
57 #define FIRMWARE_8107E_2	"rtl_nic/rtl8107e-2.fw"
58 
59 #define R8169_MSG_DEFAULT \
60 	(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
61 
62 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
63    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
64 #define	MC_FILTER_LIMIT	32
65 
66 #define TX_DMA_BURST	7	/* Maximum PCI burst, '7' is unlimited */
67 #define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
68 
69 #define R8169_REGS_SIZE		256
70 #define R8169_RX_BUF_SIZE	(SZ_16K - 1)
71 #define NUM_TX_DESC	64	/* Number of Tx descriptor registers */
72 #define NUM_RX_DESC	256U	/* Number of Rx descriptor registers */
73 #define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
74 #define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
75 
76 #define RTL_CFG_NO_GBIT	1
77 
78 /* write/read MMIO register */
79 #define RTL_W8(tp, reg, val8)	writeb((val8), tp->mmio_addr + (reg))
80 #define RTL_W16(tp, reg, val16)	writew((val16), tp->mmio_addr + (reg))
81 #define RTL_W32(tp, reg, val32)	writel((val32), tp->mmio_addr + (reg))
82 #define RTL_R8(tp, reg)		readb(tp->mmio_addr + (reg))
83 #define RTL_R16(tp, reg)		readw(tp->mmio_addr + (reg))
84 #define RTL_R32(tp, reg)		readl(tp->mmio_addr + (reg))
85 
86 enum mac_version {
87 	/* support for ancient RTL_GIGA_MAC_VER_01 has been removed */
88 	RTL_GIGA_MAC_VER_02,
89 	RTL_GIGA_MAC_VER_03,
90 	RTL_GIGA_MAC_VER_04,
91 	RTL_GIGA_MAC_VER_05,
92 	RTL_GIGA_MAC_VER_06,
93 	RTL_GIGA_MAC_VER_07,
94 	RTL_GIGA_MAC_VER_08,
95 	RTL_GIGA_MAC_VER_09,
96 	RTL_GIGA_MAC_VER_10,
97 	RTL_GIGA_MAC_VER_11,
98 	RTL_GIGA_MAC_VER_12,
99 	RTL_GIGA_MAC_VER_13,
100 	RTL_GIGA_MAC_VER_14,
101 	RTL_GIGA_MAC_VER_15,
102 	RTL_GIGA_MAC_VER_16,
103 	RTL_GIGA_MAC_VER_17,
104 	RTL_GIGA_MAC_VER_18,
105 	RTL_GIGA_MAC_VER_19,
106 	RTL_GIGA_MAC_VER_20,
107 	RTL_GIGA_MAC_VER_21,
108 	RTL_GIGA_MAC_VER_22,
109 	RTL_GIGA_MAC_VER_23,
110 	RTL_GIGA_MAC_VER_24,
111 	RTL_GIGA_MAC_VER_25,
112 	RTL_GIGA_MAC_VER_26,
113 	RTL_GIGA_MAC_VER_27,
114 	RTL_GIGA_MAC_VER_28,
115 	RTL_GIGA_MAC_VER_29,
116 	RTL_GIGA_MAC_VER_30,
117 	RTL_GIGA_MAC_VER_31,
118 	RTL_GIGA_MAC_VER_32,
119 	RTL_GIGA_MAC_VER_33,
120 	RTL_GIGA_MAC_VER_34,
121 	RTL_GIGA_MAC_VER_35,
122 	RTL_GIGA_MAC_VER_36,
123 	RTL_GIGA_MAC_VER_37,
124 	RTL_GIGA_MAC_VER_38,
125 	RTL_GIGA_MAC_VER_39,
126 	RTL_GIGA_MAC_VER_40,
127 	RTL_GIGA_MAC_VER_41,
128 	RTL_GIGA_MAC_VER_42,
129 	RTL_GIGA_MAC_VER_43,
130 	RTL_GIGA_MAC_VER_44,
131 	RTL_GIGA_MAC_VER_45,
132 	RTL_GIGA_MAC_VER_46,
133 	RTL_GIGA_MAC_VER_47,
134 	RTL_GIGA_MAC_VER_48,
135 	RTL_GIGA_MAC_VER_49,
136 	RTL_GIGA_MAC_VER_50,
137 	RTL_GIGA_MAC_VER_51,
138 	RTL_GIGA_MAC_NONE
139 };
140 
141 #define JUMBO_1K	ETH_DATA_LEN
142 #define JUMBO_4K	(4*1024 - ETH_HLEN - 2)
143 #define JUMBO_6K	(6*1024 - ETH_HLEN - 2)
144 #define JUMBO_7K	(7*1024 - ETH_HLEN - 2)
145 #define JUMBO_9K	(9*1024 - ETH_HLEN - 2)
146 
147 static const struct {
148 	const char *name;
149 	const char *fw_name;
150 } rtl_chip_infos[] = {
151 	/* PCI devices. */
152 	[RTL_GIGA_MAC_VER_02] = {"RTL8169s"				},
153 	[RTL_GIGA_MAC_VER_03] = {"RTL8110s"				},
154 	[RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb"			},
155 	[RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc"			},
156 	[RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc"			},
157 	/* PCI-E devices. */
158 	[RTL_GIGA_MAC_VER_07] = {"RTL8102e"				},
159 	[RTL_GIGA_MAC_VER_08] = {"RTL8102e"				},
160 	[RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e"			},
161 	[RTL_GIGA_MAC_VER_10] = {"RTL8101e"				},
162 	[RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b"			},
163 	[RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b"			},
164 	[RTL_GIGA_MAC_VER_13] = {"RTL8101e"				},
165 	[RTL_GIGA_MAC_VER_14] = {"RTL8100e"				},
166 	[RTL_GIGA_MAC_VER_15] = {"RTL8100e"				},
167 	[RTL_GIGA_MAC_VER_16] = {"RTL8101e"				},
168 	[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b"			},
169 	[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp"			},
170 	[RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c"			},
171 	[RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c"			},
172 	[RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c"			},
173 	[RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c"			},
174 	[RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp"			},
175 	[RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp"			},
176 	[RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d",	FIRMWARE_8168D_1},
177 	[RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d",	FIRMWARE_8168D_2},
178 	[RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp"			},
179 	[RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp"			},
180 	[RTL_GIGA_MAC_VER_29] = {"RTL8105e",		FIRMWARE_8105E_1},
181 	[RTL_GIGA_MAC_VER_30] = {"RTL8105e",		FIRMWARE_8105E_1},
182 	[RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp"			},
183 	[RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e",	FIRMWARE_8168E_1},
184 	[RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e",	FIRMWARE_8168E_2},
185 	[RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl",	FIRMWARE_8168E_3},
186 	[RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f",	FIRMWARE_8168F_1},
187 	[RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f",	FIRMWARE_8168F_2},
188 	[RTL_GIGA_MAC_VER_37] = {"RTL8402",		FIRMWARE_8402_1 },
189 	[RTL_GIGA_MAC_VER_38] = {"RTL8411",		FIRMWARE_8411_1 },
190 	[RTL_GIGA_MAC_VER_39] = {"RTL8106e",		FIRMWARE_8106E_1},
191 	[RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g",	FIRMWARE_8168G_2},
192 	[RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g"			},
193 	[RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu",	FIRMWARE_8168G_3},
194 	[RTL_GIGA_MAC_VER_43] = {"RTL8106eus",		FIRMWARE_8106E_2},
195 	[RTL_GIGA_MAC_VER_44] = {"RTL8411b",		FIRMWARE_8411_2 },
196 	[RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h",	FIRMWARE_8168H_1},
197 	[RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h",	FIRMWARE_8168H_2},
198 	[RTL_GIGA_MAC_VER_47] = {"RTL8107e",		FIRMWARE_8107E_1},
199 	[RTL_GIGA_MAC_VER_48] = {"RTL8107e",		FIRMWARE_8107E_2},
200 	[RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep"			},
201 	[RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep"			},
202 	[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep"			},
203 };
204 
205 static const struct pci_device_id rtl8169_pci_tbl[] = {
206 	{ PCI_VDEVICE(REALTEK,	0x2502) },
207 	{ PCI_VDEVICE(REALTEK,	0x2600) },
208 	{ PCI_VDEVICE(REALTEK,	0x8129) },
209 	{ PCI_VDEVICE(REALTEK,	0x8136), RTL_CFG_NO_GBIT },
210 	{ PCI_VDEVICE(REALTEK,	0x8161) },
211 	{ PCI_VDEVICE(REALTEK,	0x8167) },
212 	{ PCI_VDEVICE(REALTEK,	0x8168) },
213 	{ PCI_VDEVICE(NCUBE,	0x8168) },
214 	{ PCI_VDEVICE(REALTEK,	0x8169) },
215 	{ PCI_VENDOR_ID_DLINK,	0x4300,
216 		PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 },
217 	{ PCI_VDEVICE(DLINK,	0x4300) },
218 	{ PCI_VDEVICE(DLINK,	0x4302) },
219 	{ PCI_VDEVICE(AT,	0xc107) },
220 	{ PCI_VDEVICE(USR,	0x0116) },
221 	{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
222 	{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
223 	{}
224 };
225 
226 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
227 
228 static struct {
229 	u32 msg_enable;
230 } debug = { -1 };
231 
232 enum rtl_registers {
233 	MAC0		= 0,	/* Ethernet hardware address. */
234 	MAC4		= 4,
235 	MAR0		= 8,	/* Multicast filter. */
236 	CounterAddrLow		= 0x10,
237 	CounterAddrHigh		= 0x14,
238 	TxDescStartAddrLow	= 0x20,
239 	TxDescStartAddrHigh	= 0x24,
240 	TxHDescStartAddrLow	= 0x28,
241 	TxHDescStartAddrHigh	= 0x2c,
242 	FLASH		= 0x30,
243 	ERSR		= 0x36,
244 	ChipCmd		= 0x37,
245 	TxPoll		= 0x38,
246 	IntrMask	= 0x3c,
247 	IntrStatus	= 0x3e,
248 
249 	TxConfig	= 0x40,
250 #define	TXCFG_AUTO_FIFO			(1 << 7)	/* 8111e-vl */
251 #define	TXCFG_EMPTY			(1 << 11)	/* 8111e-vl */
252 
253 	RxConfig	= 0x44,
254 #define	RX128_INT_EN			(1 << 15)	/* 8111c and later */
255 #define	RX_MULTI_EN			(1 << 14)	/* 8111c only */
256 #define	RXCFG_FIFO_SHIFT		13
257 					/* No threshold before first PCI xfer */
258 #define	RX_FIFO_THRESH			(7 << RXCFG_FIFO_SHIFT)
259 #define	RX_EARLY_OFF			(1 << 11)
260 #define	RXCFG_DMA_SHIFT			8
261 					/* Unlimited maximum PCI burst. */
262 #define	RX_DMA_BURST			(7 << RXCFG_DMA_SHIFT)
263 
264 	RxMissed	= 0x4c,
265 	Cfg9346		= 0x50,
266 	Config0		= 0x51,
267 	Config1		= 0x52,
268 	Config2		= 0x53,
269 #define PME_SIGNAL			(1 << 5)	/* 8168c and later */
270 
271 	Config3		= 0x54,
272 	Config4		= 0x55,
273 	Config5		= 0x56,
274 	MultiIntr	= 0x5c,
275 	PHYAR		= 0x60,
276 	PHYstatus	= 0x6c,
277 	RxMaxSize	= 0xda,
278 	CPlusCmd	= 0xe0,
279 	IntrMitigate	= 0xe2,
280 
281 #define RTL_COALESCE_MASK	0x0f
282 #define RTL_COALESCE_SHIFT	4
283 #define RTL_COALESCE_T_MAX	(RTL_COALESCE_MASK)
284 #define RTL_COALESCE_FRAME_MAX	(RTL_COALESCE_MASK << 2)
285 
286 	RxDescAddrLow	= 0xe4,
287 	RxDescAddrHigh	= 0xe8,
288 	EarlyTxThres	= 0xec,	/* 8169. Unit of 32 bytes. */
289 
290 #define NoEarlyTx	0x3f	/* Max value : no early transmit. */
291 
292 	MaxTxPacketSize	= 0xec,	/* 8101/8168. Unit of 128 bytes. */
293 
294 #define TxPacketMax	(8064 >> 7)
295 #define EarlySize	0x27
296 
297 	FuncEvent	= 0xf0,
298 	FuncEventMask	= 0xf4,
299 	FuncPresetState	= 0xf8,
300 	IBCR0           = 0xf8,
301 	IBCR2           = 0xf9,
302 	IBIMR0          = 0xfa,
303 	IBISR0          = 0xfb,
304 	FuncForceEvent	= 0xfc,
305 };
306 
307 enum rtl8168_8101_registers {
308 	CSIDR			= 0x64,
309 	CSIAR			= 0x68,
310 #define	CSIAR_FLAG			0x80000000
311 #define	CSIAR_WRITE_CMD			0x80000000
312 #define	CSIAR_BYTE_ENABLE		0x0000f000
313 #define	CSIAR_ADDR_MASK			0x00000fff
314 	PMCH			= 0x6f,
315 	EPHYAR			= 0x80,
316 #define	EPHYAR_FLAG			0x80000000
317 #define	EPHYAR_WRITE_CMD		0x80000000
318 #define	EPHYAR_REG_MASK			0x1f
319 #define	EPHYAR_REG_SHIFT		16
320 #define	EPHYAR_DATA_MASK		0xffff
321 	DLLPR			= 0xd0,
322 #define	PFM_EN				(1 << 6)
323 #define	TX_10M_PS_EN			(1 << 7)
324 	DBG_REG			= 0xd1,
325 #define	FIX_NAK_1			(1 << 4)
326 #define	FIX_NAK_2			(1 << 3)
327 	TWSI			= 0xd2,
328 	MCU			= 0xd3,
329 #define	NOW_IS_OOB			(1 << 7)
330 #define	TX_EMPTY			(1 << 5)
331 #define	RX_EMPTY			(1 << 4)
332 #define	RXTX_EMPTY			(TX_EMPTY | RX_EMPTY)
333 #define	EN_NDP				(1 << 3)
334 #define	EN_OOB_RESET			(1 << 2)
335 #define	LINK_LIST_RDY			(1 << 1)
336 	EFUSEAR			= 0xdc,
337 #define	EFUSEAR_FLAG			0x80000000
338 #define	EFUSEAR_WRITE_CMD		0x80000000
339 #define	EFUSEAR_READ_CMD		0x00000000
340 #define	EFUSEAR_REG_MASK		0x03ff
341 #define	EFUSEAR_REG_SHIFT		8
342 #define	EFUSEAR_DATA_MASK		0xff
343 	MISC_1			= 0xf2,
344 #define	PFM_D3COLD_EN			(1 << 6)
345 };
346 
347 enum rtl8168_registers {
348 	LED_FREQ		= 0x1a,
349 	EEE_LED			= 0x1b,
350 	ERIDR			= 0x70,
351 	ERIAR			= 0x74,
352 #define ERIAR_FLAG			0x80000000
353 #define ERIAR_WRITE_CMD			0x80000000
354 #define ERIAR_READ_CMD			0x00000000
355 #define ERIAR_ADDR_BYTE_ALIGN		4
356 #define ERIAR_TYPE_SHIFT		16
357 #define ERIAR_EXGMAC			(0x00 << ERIAR_TYPE_SHIFT)
358 #define ERIAR_MSIX			(0x01 << ERIAR_TYPE_SHIFT)
359 #define ERIAR_ASF			(0x02 << ERIAR_TYPE_SHIFT)
360 #define ERIAR_OOB			(0x02 << ERIAR_TYPE_SHIFT)
361 #define ERIAR_MASK_SHIFT		12
362 #define ERIAR_MASK_0001			(0x1 << ERIAR_MASK_SHIFT)
363 #define ERIAR_MASK_0011			(0x3 << ERIAR_MASK_SHIFT)
364 #define ERIAR_MASK_0100			(0x4 << ERIAR_MASK_SHIFT)
365 #define ERIAR_MASK_0101			(0x5 << ERIAR_MASK_SHIFT)
366 #define ERIAR_MASK_1111			(0xf << ERIAR_MASK_SHIFT)
367 	EPHY_RXER_NUM		= 0x7c,
368 	OCPDR			= 0xb0,	/* OCP GPHY access */
369 #define OCPDR_WRITE_CMD			0x80000000
370 #define OCPDR_READ_CMD			0x00000000
371 #define OCPDR_REG_MASK			0x7f
372 #define OCPDR_GPHY_REG_SHIFT		16
373 #define OCPDR_DATA_MASK			0xffff
374 	OCPAR			= 0xb4,
375 #define OCPAR_FLAG			0x80000000
376 #define OCPAR_GPHY_WRITE_CMD		0x8000f060
377 #define OCPAR_GPHY_READ_CMD		0x0000f060
378 	GPHY_OCP		= 0xb8,
379 	RDSAR1			= 0xd0,	/* 8168c only. Undocumented on 8168dp */
380 	MISC			= 0xf0,	/* 8168e only. */
381 #define TXPLA_RST			(1 << 29)
382 #define DISABLE_LAN_EN			(1 << 23) /* Enable GPIO pin */
383 #define PWM_EN				(1 << 22)
384 #define RXDV_GATED_EN			(1 << 19)
385 #define EARLY_TALLY_EN			(1 << 16)
386 };
387 
388 enum rtl_register_content {
389 	/* InterruptStatusBits */
390 	SYSErr		= 0x8000,
391 	PCSTimeout	= 0x4000,
392 	SWInt		= 0x0100,
393 	TxDescUnavail	= 0x0080,
394 	RxFIFOOver	= 0x0040,
395 	LinkChg		= 0x0020,
396 	RxOverflow	= 0x0010,
397 	TxErr		= 0x0008,
398 	TxOK		= 0x0004,
399 	RxErr		= 0x0002,
400 	RxOK		= 0x0001,
401 
402 	/* RxStatusDesc */
403 	RxRWT	= (1 << 22),
404 	RxRES	= (1 << 21),
405 	RxRUNT	= (1 << 20),
406 	RxCRC	= (1 << 19),
407 
408 	/* ChipCmdBits */
409 	StopReq		= 0x80,
410 	CmdReset	= 0x10,
411 	CmdRxEnb	= 0x08,
412 	CmdTxEnb	= 0x04,
413 	RxBufEmpty	= 0x01,
414 
415 	/* TXPoll register p.5 */
416 	HPQ		= 0x80,		/* Poll cmd on the high prio queue */
417 	NPQ		= 0x40,		/* Poll cmd on the low prio queue */
418 	FSWInt		= 0x01,		/* Forced software interrupt */
419 
420 	/* Cfg9346Bits */
421 	Cfg9346_Lock	= 0x00,
422 	Cfg9346_Unlock	= 0xc0,
423 
424 	/* rx_mode_bits */
425 	AcceptErr	= 0x20,
426 	AcceptRunt	= 0x10,
427 	AcceptBroadcast	= 0x08,
428 	AcceptMulticast	= 0x04,
429 	AcceptMyPhys	= 0x02,
430 	AcceptAllPhys	= 0x01,
431 #define RX_CONFIG_ACCEPT_MASK		0x3f
432 
433 	/* TxConfigBits */
434 	TxInterFrameGapShift = 24,
435 	TxDMAShift = 8,	/* DMA burst value (0-7) is shift this many bits */
436 
437 	/* Config1 register p.24 */
438 	LEDS1		= (1 << 7),
439 	LEDS0		= (1 << 6),
440 	Speed_down	= (1 << 4),
441 	MEMMAP		= (1 << 3),
442 	IOMAP		= (1 << 2),
443 	VPD		= (1 << 1),
444 	PMEnable	= (1 << 0),	/* Power Management Enable */
445 
446 	/* Config2 register p. 25 */
447 	ClkReqEn	= (1 << 7),	/* Clock Request Enable */
448 	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
449 	PCI_Clock_66MHz = 0x01,
450 	PCI_Clock_33MHz = 0x00,
451 
452 	/* Config3 register p.25 */
453 	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
454 	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
455 	Jumbo_En0	= (1 << 2),	/* 8168 only. Reserved in the 8168b */
456 	Rdy_to_L23	= (1 << 1),	/* L23 Enable */
457 	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
458 
459 	/* Config4 register */
460 	Jumbo_En1	= (1 << 1),	/* 8168 only. Reserved in the 8168b */
461 
462 	/* Config5 register p.27 */
463 	BWF		= (1 << 6),	/* Accept Broadcast wakeup frame */
464 	MWF		= (1 << 5),	/* Accept Multicast wakeup frame */
465 	UWF		= (1 << 4),	/* Accept Unicast wakeup frame */
466 	Spi_en		= (1 << 3),
467 	LanWake		= (1 << 1),	/* LanWake enable/disable */
468 	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
469 	ASPM_en		= (1 << 0),	/* ASPM enable */
470 
471 	/* CPlusCmd p.31 */
472 	EnableBist	= (1 << 15),	// 8168 8101
473 	Mac_dbgo_oe	= (1 << 14),	// 8168 8101
474 	Normal_mode	= (1 << 13),	// unused
475 	Force_half_dup	= (1 << 12),	// 8168 8101
476 	Force_rxflow_en	= (1 << 11),	// 8168 8101
477 	Force_txflow_en	= (1 << 10),	// 8168 8101
478 	Cxpl_dbg_sel	= (1 << 9),	// 8168 8101
479 	ASF		= (1 << 8),	// 8168 8101
480 	PktCntrDisable	= (1 << 7),	// 8168 8101
481 	Mac_dbgo_sel	= 0x001c,	// 8168
482 	RxVlan		= (1 << 6),
483 	RxChkSum	= (1 << 5),
484 	PCIDAC		= (1 << 4),
485 	PCIMulRW	= (1 << 3),
486 #define INTT_MASK	GENMASK(1, 0)
487 #define CPCMD_MASK	(Normal_mode | RxVlan | RxChkSum | INTT_MASK)
488 
489 	/* rtl8169_PHYstatus */
490 	TBI_Enable	= 0x80,
491 	TxFlowCtrl	= 0x40,
492 	RxFlowCtrl	= 0x20,
493 	_1000bpsF	= 0x10,
494 	_100bps		= 0x08,
495 	_10bps		= 0x04,
496 	LinkStatus	= 0x02,
497 	FullDup		= 0x01,
498 
499 	/* ResetCounterCommand */
500 	CounterReset	= 0x1,
501 
502 	/* DumpCounterCommand */
503 	CounterDump	= 0x8,
504 
505 	/* magic enable v2 */
506 	MagicPacket_v2	= (1 << 16),	/* Wake up when receives a Magic Packet */
507 };
508 
509 enum rtl_desc_bit {
510 	/* First doubleword. */
511 	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
512 	RingEnd		= (1 << 30), /* End of descriptor ring */
513 	FirstFrag	= (1 << 29), /* First segment of a packet */
514 	LastFrag	= (1 << 28), /* Final segment of a packet */
515 };
516 
517 /* Generic case. */
518 enum rtl_tx_desc_bit {
519 	/* First doubleword. */
520 	TD_LSO		= (1 << 27),		/* Large Send Offload */
521 #define TD_MSS_MAX			0x07ffu	/* MSS value */
522 
523 	/* Second doubleword. */
524 	TxVlanTag	= (1 << 17),		/* Add VLAN tag */
525 };
526 
527 /* 8169, 8168b and 810x except 8102e. */
528 enum rtl_tx_desc_bit_0 {
529 	/* First doubleword. */
530 #define TD0_MSS_SHIFT			16	/* MSS position (11 bits) */
531 	TD0_TCP_CS	= (1 << 16),		/* Calculate TCP/IP checksum */
532 	TD0_UDP_CS	= (1 << 17),		/* Calculate UDP/IP checksum */
533 	TD0_IP_CS	= (1 << 18),		/* Calculate IP checksum */
534 };
535 
536 /* 8102e, 8168c and beyond. */
537 enum rtl_tx_desc_bit_1 {
538 	/* First doubleword. */
539 	TD1_GTSENV4	= (1 << 26),		/* Giant Send for IPv4 */
540 	TD1_GTSENV6	= (1 << 25),		/* Giant Send for IPv6 */
541 #define GTTCPHO_SHIFT			18
542 #define GTTCPHO_MAX			0x7f
543 
544 	/* Second doubleword. */
545 #define TCPHO_SHIFT			18
546 #define TCPHO_MAX			0x3ff
547 #define TD1_MSS_SHIFT			18	/* MSS position (11 bits) */
548 	TD1_IPv6_CS	= (1 << 28),		/* Calculate IPv6 checksum */
549 	TD1_IPv4_CS	= (1 << 29),		/* Calculate IPv4 checksum */
550 	TD1_TCP_CS	= (1 << 30),		/* Calculate TCP/IP checksum */
551 	TD1_UDP_CS	= (1 << 31),		/* Calculate UDP/IP checksum */
552 };
553 
554 enum rtl_rx_desc_bit {
555 	/* Rx private */
556 	PID1		= (1 << 18), /* Protocol ID bit 1/2 */
557 	PID0		= (1 << 17), /* Protocol ID bit 0/2 */
558 
559 #define RxProtoUDP	(PID1)
560 #define RxProtoTCP	(PID0)
561 #define RxProtoIP	(PID1 | PID0)
562 #define RxProtoMask	RxProtoIP
563 
564 	IPFail		= (1 << 16), /* IP checksum failed */
565 	UDPFail		= (1 << 15), /* UDP/IP checksum failed */
566 	TCPFail		= (1 << 14), /* TCP/IP checksum failed */
567 	RxVlanTag	= (1 << 16), /* VLAN tag available */
568 };
569 
570 #define RsvdMask	0x3fffc000
571 
572 #define RTL_GSO_MAX_SIZE_V1	32000
573 #define RTL_GSO_MAX_SEGS_V1	24
574 #define RTL_GSO_MAX_SIZE_V2	64000
575 #define RTL_GSO_MAX_SEGS_V2	64
576 
577 struct TxDesc {
578 	__le32 opts1;
579 	__le32 opts2;
580 	__le64 addr;
581 };
582 
583 struct RxDesc {
584 	__le32 opts1;
585 	__le32 opts2;
586 	__le64 addr;
587 };
588 
589 struct ring_info {
590 	struct sk_buff	*skb;
591 	u32		len;
592 };
593 
594 struct rtl8169_counters {
595 	__le64	tx_packets;
596 	__le64	rx_packets;
597 	__le64	tx_errors;
598 	__le32	rx_errors;
599 	__le16	rx_missed;
600 	__le16	align_errors;
601 	__le32	tx_one_collision;
602 	__le32	tx_multi_collision;
603 	__le64	rx_unicast;
604 	__le64	rx_broadcast;
605 	__le32	rx_multicast;
606 	__le16	tx_aborted;
607 	__le16	tx_underun;
608 };
609 
610 struct rtl8169_tc_offsets {
611 	bool	inited;
612 	__le64	tx_errors;
613 	__le32	tx_multi_collision;
614 	__le16	tx_aborted;
615 };
616 
617 enum rtl_flag {
618 	RTL_FLAG_TASK_ENABLED = 0,
619 	RTL_FLAG_TASK_RESET_PENDING,
620 	RTL_FLAG_MAX
621 };
622 
623 struct rtl8169_stats {
624 	u64			packets;
625 	u64			bytes;
626 	struct u64_stats_sync	syncp;
627 };
628 
629 struct rtl8169_private {
630 	void __iomem *mmio_addr;	/* memory map physical address */
631 	struct pci_dev *pci_dev;
632 	struct net_device *dev;
633 	struct phy_device *phydev;
634 	struct napi_struct napi;
635 	u32 msg_enable;
636 	enum mac_version mac_version;
637 	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
638 	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
639 	u32 dirty_tx;
640 	struct rtl8169_stats rx_stats;
641 	struct rtl8169_stats tx_stats;
642 	struct TxDesc *TxDescArray;	/* 256-aligned Tx descriptor ring */
643 	struct RxDesc *RxDescArray;	/* 256-aligned Rx descriptor ring */
644 	dma_addr_t TxPhyAddr;
645 	dma_addr_t RxPhyAddr;
646 	void *Rx_databuff[NUM_RX_DESC];	/* Rx data buffers */
647 	struct ring_info tx_skb[NUM_TX_DESC];	/* Tx data buffers */
648 	u16 cp_cmd;
649 	u16 irq_mask;
650 	struct clk *clk;
651 
652 	struct {
653 		DECLARE_BITMAP(flags, RTL_FLAG_MAX);
654 		struct mutex mutex;
655 		struct work_struct work;
656 	} wk;
657 
658 	unsigned irq_enabled:1;
659 	unsigned supports_gmii:1;
660 	unsigned aspm_manageable:1;
661 	dma_addr_t counters_phys_addr;
662 	struct rtl8169_counters *counters;
663 	struct rtl8169_tc_offsets tc_offset;
664 	u32 saved_wolopts;
665 
666 	const char *fw_name;
667 	struct rtl_fw *rtl_fw;
668 
669 	u32 ocp_base;
670 };
671 
672 typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
673 
674 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
675 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
676 module_param_named(debug, debug.msg_enable, int, 0);
677 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
678 MODULE_SOFTDEP("pre: realtek");
679 MODULE_LICENSE("GPL");
680 MODULE_FIRMWARE(FIRMWARE_8168D_1);
681 MODULE_FIRMWARE(FIRMWARE_8168D_2);
682 MODULE_FIRMWARE(FIRMWARE_8168E_1);
683 MODULE_FIRMWARE(FIRMWARE_8168E_2);
684 MODULE_FIRMWARE(FIRMWARE_8168E_3);
685 MODULE_FIRMWARE(FIRMWARE_8105E_1);
686 MODULE_FIRMWARE(FIRMWARE_8168F_1);
687 MODULE_FIRMWARE(FIRMWARE_8168F_2);
688 MODULE_FIRMWARE(FIRMWARE_8402_1);
689 MODULE_FIRMWARE(FIRMWARE_8411_1);
690 MODULE_FIRMWARE(FIRMWARE_8411_2);
691 MODULE_FIRMWARE(FIRMWARE_8106E_1);
692 MODULE_FIRMWARE(FIRMWARE_8106E_2);
693 MODULE_FIRMWARE(FIRMWARE_8168G_2);
694 MODULE_FIRMWARE(FIRMWARE_8168G_3);
695 MODULE_FIRMWARE(FIRMWARE_8168H_1);
696 MODULE_FIRMWARE(FIRMWARE_8168H_2);
697 MODULE_FIRMWARE(FIRMWARE_8107E_1);
698 MODULE_FIRMWARE(FIRMWARE_8107E_2);
699 
700 static inline struct device *tp_to_dev(struct rtl8169_private *tp)
701 {
702 	return &tp->pci_dev->dev;
703 }
704 
705 static void rtl_lock_work(struct rtl8169_private *tp)
706 {
707 	mutex_lock(&tp->wk.mutex);
708 }
709 
710 static void rtl_unlock_work(struct rtl8169_private *tp)
711 {
712 	mutex_unlock(&tp->wk.mutex);
713 }
714 
715 static void rtl_lock_config_regs(struct rtl8169_private *tp)
716 {
717 	RTL_W8(tp, Cfg9346, Cfg9346_Lock);
718 }
719 
720 static void rtl_unlock_config_regs(struct rtl8169_private *tp)
721 {
722 	RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
723 }
724 
725 static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
726 {
727 	pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
728 					   PCI_EXP_DEVCTL_READRQ, force);
729 }
730 
731 static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
732 {
733 	return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
734 	       tp->mac_version != RTL_GIGA_MAC_VER_39;
735 }
736 
737 struct rtl_cond {
738 	bool (*check)(struct rtl8169_private *);
739 	const char *msg;
740 };
741 
742 static void rtl_udelay(unsigned int d)
743 {
744 	udelay(d);
745 }
746 
747 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
748 			  void (*delay)(unsigned int), unsigned int d, int n,
749 			  bool high)
750 {
751 	int i;
752 
753 	for (i = 0; i < n; i++) {
754 		if (c->check(tp) == high)
755 			return true;
756 		delay(d);
757 	}
758 	netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
759 		  c->msg, !high, n, d);
760 	return false;
761 }
762 
763 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
764 				      const struct rtl_cond *c,
765 				      unsigned int d, int n)
766 {
767 	return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
768 }
769 
770 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
771 				     const struct rtl_cond *c,
772 				     unsigned int d, int n)
773 {
774 	return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
775 }
776 
777 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
778 				      const struct rtl_cond *c,
779 				      unsigned int d, int n)
780 {
781 	return rtl_loop_wait(tp, c, msleep, d, n, true);
782 }
783 
784 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
785 				     const struct rtl_cond *c,
786 				     unsigned int d, int n)
787 {
788 	return rtl_loop_wait(tp, c, msleep, d, n, false);
789 }
790 
791 #define DECLARE_RTL_COND(name)				\
792 static bool name ## _check(struct rtl8169_private *);	\
793 							\
794 static const struct rtl_cond name = {			\
795 	.check	= name ## _check,			\
796 	.msg	= #name					\
797 };							\
798 							\
799 static bool name ## _check(struct rtl8169_private *tp)
800 
801 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
802 {
803 	if (reg & 0xffff0001) {
804 		netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
805 		return true;
806 	}
807 	return false;
808 }
809 
810 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
811 {
812 	return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG;
813 }
814 
815 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
816 {
817 	if (rtl_ocp_reg_failure(tp, reg))
818 		return;
819 
820 	RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
821 
822 	rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
823 }
824 
825 static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
826 {
827 	if (rtl_ocp_reg_failure(tp, reg))
828 		return 0;
829 
830 	RTL_W32(tp, GPHY_OCP, reg << 15);
831 
832 	return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
833 		(RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
834 }
835 
836 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
837 {
838 	if (rtl_ocp_reg_failure(tp, reg))
839 		return;
840 
841 	RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
842 }
843 
844 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
845 {
846 	if (rtl_ocp_reg_failure(tp, reg))
847 		return 0;
848 
849 	RTL_W32(tp, OCPDR, reg << 15);
850 
851 	return RTL_R32(tp, OCPDR);
852 }
853 
854 #define OCP_STD_PHY_BASE	0xa400
855 
856 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
857 {
858 	if (reg == 0x1f) {
859 		tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
860 		return;
861 	}
862 
863 	if (tp->ocp_base != OCP_STD_PHY_BASE)
864 		reg -= 0x10;
865 
866 	r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
867 }
868 
869 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
870 {
871 	if (tp->ocp_base != OCP_STD_PHY_BASE)
872 		reg -= 0x10;
873 
874 	return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
875 }
876 
877 static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
878 {
879 	if (reg == 0x1f) {
880 		tp->ocp_base = value << 4;
881 		return;
882 	}
883 
884 	r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
885 }
886 
887 static int mac_mcu_read(struct rtl8169_private *tp, int reg)
888 {
889 	return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
890 }
891 
892 DECLARE_RTL_COND(rtl_phyar_cond)
893 {
894 	return RTL_R32(tp, PHYAR) & 0x80000000;
895 }
896 
897 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
898 {
899 	RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
900 
901 	rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
902 	/*
903 	 * According to hardware specs a 20us delay is required after write
904 	 * complete indication, but before sending next command.
905 	 */
906 	udelay(20);
907 }
908 
909 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
910 {
911 	int value;
912 
913 	RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
914 
915 	value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
916 		RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT;
917 
918 	/*
919 	 * According to hardware specs a 20us delay is required after read
920 	 * complete indication, but before sending next command.
921 	 */
922 	udelay(20);
923 
924 	return value;
925 }
926 
927 DECLARE_RTL_COND(rtl_ocpar_cond)
928 {
929 	return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
930 }
931 
932 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
933 {
934 	RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
935 	RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
936 	RTL_W32(tp, EPHY_RXER_NUM, 0);
937 
938 	rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
939 }
940 
941 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
942 {
943 	r8168dp_1_mdio_access(tp, reg,
944 			      OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
945 }
946 
947 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
948 {
949 	r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
950 
951 	mdelay(1);
952 	RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
953 	RTL_W32(tp, EPHY_RXER_NUM, 0);
954 
955 	return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
956 		RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
957 }
958 
959 #define R8168DP_1_MDIO_ACCESS_BIT	0x00020000
960 
961 static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
962 {
963 	RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
964 }
965 
966 static void r8168dp_2_mdio_stop(struct rtl8169_private *tp)
967 {
968 	RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
969 }
970 
971 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
972 {
973 	r8168dp_2_mdio_start(tp);
974 
975 	r8169_mdio_write(tp, reg, value);
976 
977 	r8168dp_2_mdio_stop(tp);
978 }
979 
980 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
981 {
982 	int value;
983 
984 	r8168dp_2_mdio_start(tp);
985 
986 	value = r8169_mdio_read(tp, reg);
987 
988 	r8168dp_2_mdio_stop(tp);
989 
990 	return value;
991 }
992 
993 static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
994 {
995 	switch (tp->mac_version) {
996 	case RTL_GIGA_MAC_VER_27:
997 		r8168dp_1_mdio_write(tp, location, val);
998 		break;
999 	case RTL_GIGA_MAC_VER_28:
1000 	case RTL_GIGA_MAC_VER_31:
1001 		r8168dp_2_mdio_write(tp, location, val);
1002 		break;
1003 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1004 		r8168g_mdio_write(tp, location, val);
1005 		break;
1006 	default:
1007 		r8169_mdio_write(tp, location, val);
1008 		break;
1009 	}
1010 }
1011 
1012 static int rtl_readphy(struct rtl8169_private *tp, int location)
1013 {
1014 	switch (tp->mac_version) {
1015 	case RTL_GIGA_MAC_VER_27:
1016 		return r8168dp_1_mdio_read(tp, location);
1017 	case RTL_GIGA_MAC_VER_28:
1018 	case RTL_GIGA_MAC_VER_31:
1019 		return r8168dp_2_mdio_read(tp, location);
1020 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1021 		return r8168g_mdio_read(tp, location);
1022 	default:
1023 		return r8169_mdio_read(tp, location);
1024 	}
1025 }
1026 
1027 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1028 {
1029 	rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1030 }
1031 
1032 static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1033 {
1034 	int val;
1035 
1036 	val = rtl_readphy(tp, reg_addr);
1037 	rtl_writephy(tp, reg_addr, (val & ~m) | p);
1038 }
1039 
1040 DECLARE_RTL_COND(rtl_ephyar_cond)
1041 {
1042 	return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
1043 }
1044 
1045 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1046 {
1047 	RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1048 		(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1049 
1050 	rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1051 
1052 	udelay(10);
1053 }
1054 
1055 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1056 {
1057 	RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1058 
1059 	return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1060 		RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
1061 }
1062 
1063 DECLARE_RTL_COND(rtl_eriar_cond)
1064 {
1065 	return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
1066 }
1067 
1068 static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1069 			   u32 val, int type)
1070 {
1071 	BUG_ON((addr & 3) || (mask == 0));
1072 	RTL_W32(tp, ERIDR, val);
1073 	RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1074 
1075 	rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1076 }
1077 
1078 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1079 			  u32 val)
1080 {
1081 	_rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
1082 }
1083 
1084 static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1085 {
1086 	RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1087 
1088 	return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1089 		RTL_R32(tp, ERIDR) : ~0;
1090 }
1091 
1092 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
1093 {
1094 	return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
1095 }
1096 
1097 static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1098 			 u32 m)
1099 {
1100 	u32 val;
1101 
1102 	val = rtl_eri_read(tp, addr);
1103 	rtl_eri_write(tp, addr, mask, (val & ~m) | p);
1104 }
1105 
1106 static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 mask,
1107 			     u32 p)
1108 {
1109 	rtl_w0w1_eri(tp, addr, mask, p, 0);
1110 }
1111 
1112 static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask,
1113 			       u32 m)
1114 {
1115 	rtl_w0w1_eri(tp, addr, mask, 0, m);
1116 }
1117 
1118 static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1119 {
1120 	RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1121 	return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
1122 		RTL_R32(tp, OCPDR) : ~0;
1123 }
1124 
1125 static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1126 {
1127 	return _rtl_eri_read(tp, reg, ERIAR_OOB);
1128 }
1129 
1130 static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1131 			      u32 data)
1132 {
1133 	RTL_W32(tp, OCPDR, data);
1134 	RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1135 	rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
1136 }
1137 
1138 static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1139 			      u32 data)
1140 {
1141 	_rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
1142 		       data, ERIAR_OOB);
1143 }
1144 
1145 static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
1146 {
1147 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd);
1148 
1149 	r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
1150 }
1151 
1152 #define OOB_CMD_RESET		0x00
1153 #define OOB_CMD_DRIVER_START	0x05
1154 #define OOB_CMD_DRIVER_STOP	0x06
1155 
1156 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
1157 {
1158 	return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
1159 }
1160 
1161 DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
1162 {
1163 	u16 reg;
1164 
1165 	reg = rtl8168_get_ocp_reg(tp);
1166 
1167 	return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800;
1168 }
1169 
1170 DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
1171 {
1172 	return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001;
1173 }
1174 
1175 DECLARE_RTL_COND(rtl_ocp_tx_cond)
1176 {
1177 	return RTL_R8(tp, IBISR0) & 0x20;
1178 }
1179 
1180 static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
1181 {
1182 	RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
1183 	rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
1184 	RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
1185 	RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
1186 }
1187 
1188 static void rtl8168dp_driver_start(struct rtl8169_private *tp)
1189 {
1190 	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
1191 	rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
1192 }
1193 
1194 static void rtl8168ep_driver_start(struct rtl8169_private *tp)
1195 {
1196 	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
1197 	r8168ep_ocp_write(tp, 0x01, 0x30,
1198 			  r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
1199 	rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
1200 }
1201 
1202 static void rtl8168_driver_start(struct rtl8169_private *tp)
1203 {
1204 	switch (tp->mac_version) {
1205 	case RTL_GIGA_MAC_VER_27:
1206 	case RTL_GIGA_MAC_VER_28:
1207 	case RTL_GIGA_MAC_VER_31:
1208 		rtl8168dp_driver_start(tp);
1209 		break;
1210 	case RTL_GIGA_MAC_VER_49:
1211 	case RTL_GIGA_MAC_VER_50:
1212 	case RTL_GIGA_MAC_VER_51:
1213 		rtl8168ep_driver_start(tp);
1214 		break;
1215 	default:
1216 		BUG();
1217 		break;
1218 	}
1219 }
1220 
1221 static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
1222 {
1223 	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1224 	rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
1225 }
1226 
1227 static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
1228 {
1229 	rtl8168ep_stop_cmac(tp);
1230 	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
1231 	r8168ep_ocp_write(tp, 0x01, 0x30,
1232 			  r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
1233 	rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
1234 }
1235 
1236 static void rtl8168_driver_stop(struct rtl8169_private *tp)
1237 {
1238 	switch (tp->mac_version) {
1239 	case RTL_GIGA_MAC_VER_27:
1240 	case RTL_GIGA_MAC_VER_28:
1241 	case RTL_GIGA_MAC_VER_31:
1242 		rtl8168dp_driver_stop(tp);
1243 		break;
1244 	case RTL_GIGA_MAC_VER_49:
1245 	case RTL_GIGA_MAC_VER_50:
1246 	case RTL_GIGA_MAC_VER_51:
1247 		rtl8168ep_driver_stop(tp);
1248 		break;
1249 	default:
1250 		BUG();
1251 		break;
1252 	}
1253 }
1254 
1255 static bool r8168dp_check_dash(struct rtl8169_private *tp)
1256 {
1257 	u16 reg = rtl8168_get_ocp_reg(tp);
1258 
1259 	return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000);
1260 }
1261 
1262 static bool r8168ep_check_dash(struct rtl8169_private *tp)
1263 {
1264 	return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001);
1265 }
1266 
1267 static bool r8168_check_dash(struct rtl8169_private *tp)
1268 {
1269 	switch (tp->mac_version) {
1270 	case RTL_GIGA_MAC_VER_27:
1271 	case RTL_GIGA_MAC_VER_28:
1272 	case RTL_GIGA_MAC_VER_31:
1273 		return r8168dp_check_dash(tp);
1274 	case RTL_GIGA_MAC_VER_49:
1275 	case RTL_GIGA_MAC_VER_50:
1276 	case RTL_GIGA_MAC_VER_51:
1277 		return r8168ep_check_dash(tp);
1278 	default:
1279 		return false;
1280 	}
1281 }
1282 
1283 static void rtl_reset_packet_filter(struct rtl8169_private *tp)
1284 {
1285 	rtl_eri_clear_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
1286 	rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
1287 }
1288 
1289 DECLARE_RTL_COND(rtl_efusear_cond)
1290 {
1291 	return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG;
1292 }
1293 
1294 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1295 {
1296 	RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1297 
1298 	return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1299 		RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1300 }
1301 
1302 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1303 {
1304 	RTL_W16(tp, IntrStatus, bits);
1305 }
1306 
1307 static void rtl_irq_disable(struct rtl8169_private *tp)
1308 {
1309 	RTL_W16(tp, IntrMask, 0);
1310 	tp->irq_enabled = 0;
1311 }
1312 
1313 #define RTL_EVENT_NAPI_RX	(RxOK | RxErr)
1314 #define RTL_EVENT_NAPI_TX	(TxOK | TxErr)
1315 #define RTL_EVENT_NAPI		(RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1316 
1317 static void rtl_irq_enable(struct rtl8169_private *tp)
1318 {
1319 	tp->irq_enabled = 1;
1320 	RTL_W16(tp, IntrMask, tp->irq_mask);
1321 }
1322 
1323 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1324 {
1325 	rtl_irq_disable(tp);
1326 	rtl_ack_events(tp, 0xffff);
1327 	/* PCI commit */
1328 	RTL_R8(tp, ChipCmd);
1329 }
1330 
1331 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1332 {
1333 	struct net_device *dev = tp->dev;
1334 	struct phy_device *phydev = tp->phydev;
1335 
1336 	if (!netif_running(dev))
1337 		return;
1338 
1339 	if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1340 	    tp->mac_version == RTL_GIGA_MAC_VER_38) {
1341 		if (phydev->speed == SPEED_1000) {
1342 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1343 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1344 		} else if (phydev->speed == SPEED_100) {
1345 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1346 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1347 		} else {
1348 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1349 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1350 		}
1351 		rtl_reset_packet_filter(tp);
1352 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1353 		   tp->mac_version == RTL_GIGA_MAC_VER_36) {
1354 		if (phydev->speed == SPEED_1000) {
1355 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1356 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1357 		} else {
1358 			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1359 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1360 		}
1361 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1362 		if (phydev->speed == SPEED_10) {
1363 			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02);
1364 			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a);
1365 		} else {
1366 			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
1367 		}
1368 	}
1369 }
1370 
1371 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1372 
1373 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1374 {
1375 	struct rtl8169_private *tp = netdev_priv(dev);
1376 
1377 	rtl_lock_work(tp);
1378 	wol->supported = WAKE_ANY;
1379 	wol->wolopts = tp->saved_wolopts;
1380 	rtl_unlock_work(tp);
1381 }
1382 
1383 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1384 {
1385 	unsigned int i, tmp;
1386 	static const struct {
1387 		u32 opt;
1388 		u16 reg;
1389 		u8  mask;
1390 	} cfg[] = {
1391 		{ WAKE_PHY,   Config3, LinkUp },
1392 		{ WAKE_UCAST, Config5, UWF },
1393 		{ WAKE_BCAST, Config5, BWF },
1394 		{ WAKE_MCAST, Config5, MWF },
1395 		{ WAKE_ANY,   Config5, LanWake },
1396 		{ WAKE_MAGIC, Config3, MagicPacket }
1397 	};
1398 	u8 options;
1399 
1400 	rtl_unlock_config_regs(tp);
1401 
1402 	if (rtl_is_8168evl_up(tp)) {
1403 		tmp = ARRAY_SIZE(cfg) - 1;
1404 		if (wolopts & WAKE_MAGIC)
1405 			rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100,
1406 					 MagicPacket_v2);
1407 		else
1408 			rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100,
1409 					   MagicPacket_v2);
1410 	} else {
1411 		tmp = ARRAY_SIZE(cfg);
1412 	}
1413 
1414 	for (i = 0; i < tmp; i++) {
1415 		options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
1416 		if (wolopts & cfg[i].opt)
1417 			options |= cfg[i].mask;
1418 		RTL_W8(tp, cfg[i].reg, options);
1419 	}
1420 
1421 	switch (tp->mac_version) {
1422 	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
1423 		options = RTL_R8(tp, Config1) & ~PMEnable;
1424 		if (wolopts)
1425 			options |= PMEnable;
1426 		RTL_W8(tp, Config1, options);
1427 		break;
1428 	case RTL_GIGA_MAC_VER_34:
1429 	case RTL_GIGA_MAC_VER_37:
1430 	case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_51:
1431 		options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
1432 		if (wolopts)
1433 			options |= PME_SIGNAL;
1434 		RTL_W8(tp, Config2, options);
1435 		break;
1436 	default:
1437 		break;
1438 	}
1439 
1440 	rtl_lock_config_regs(tp);
1441 
1442 	device_set_wakeup_enable(tp_to_dev(tp), wolopts);
1443 }
1444 
1445 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1446 {
1447 	struct rtl8169_private *tp = netdev_priv(dev);
1448 	struct device *d = tp_to_dev(tp);
1449 
1450 	if (wol->wolopts & ~WAKE_ANY)
1451 		return -EINVAL;
1452 
1453 	pm_runtime_get_noresume(d);
1454 
1455 	rtl_lock_work(tp);
1456 
1457 	tp->saved_wolopts = wol->wolopts;
1458 
1459 	if (pm_runtime_active(d))
1460 		__rtl8169_set_wol(tp, tp->saved_wolopts);
1461 
1462 	rtl_unlock_work(tp);
1463 
1464 	pm_runtime_put_noidle(d);
1465 
1466 	return 0;
1467 }
1468 
1469 static void rtl8169_get_drvinfo(struct net_device *dev,
1470 				struct ethtool_drvinfo *info)
1471 {
1472 	struct rtl8169_private *tp = netdev_priv(dev);
1473 	struct rtl_fw *rtl_fw = tp->rtl_fw;
1474 
1475 	strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1476 	strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1477 	BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1478 	if (rtl_fw)
1479 		strlcpy(info->fw_version, rtl_fw->version,
1480 			sizeof(info->fw_version));
1481 }
1482 
1483 static int rtl8169_get_regs_len(struct net_device *dev)
1484 {
1485 	return R8169_REGS_SIZE;
1486 }
1487 
1488 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1489 	netdev_features_t features)
1490 {
1491 	struct rtl8169_private *tp = netdev_priv(dev);
1492 
1493 	if (dev->mtu > TD_MSS_MAX)
1494 		features &= ~NETIF_F_ALL_TSO;
1495 
1496 	if (dev->mtu > JUMBO_1K &&
1497 	    tp->mac_version > RTL_GIGA_MAC_VER_06)
1498 		features &= ~NETIF_F_IP_CSUM;
1499 
1500 	return features;
1501 }
1502 
1503 static int rtl8169_set_features(struct net_device *dev,
1504 				netdev_features_t features)
1505 {
1506 	struct rtl8169_private *tp = netdev_priv(dev);
1507 	u32 rx_config;
1508 
1509 	rtl_lock_work(tp);
1510 
1511 	rx_config = RTL_R32(tp, RxConfig);
1512 	if (features & NETIF_F_RXALL)
1513 		rx_config |= (AcceptErr | AcceptRunt);
1514 	else
1515 		rx_config &= ~(AcceptErr | AcceptRunt);
1516 
1517 	RTL_W32(tp, RxConfig, rx_config);
1518 
1519 	if (features & NETIF_F_RXCSUM)
1520 		tp->cp_cmd |= RxChkSum;
1521 	else
1522 		tp->cp_cmd &= ~RxChkSum;
1523 
1524 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1525 		tp->cp_cmd |= RxVlan;
1526 	else
1527 		tp->cp_cmd &= ~RxVlan;
1528 
1529 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1530 	RTL_R16(tp, CPlusCmd);
1531 
1532 	rtl_unlock_work(tp);
1533 
1534 	return 0;
1535 }
1536 
1537 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1538 {
1539 	return (skb_vlan_tag_present(skb)) ?
1540 		TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
1541 }
1542 
1543 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1544 {
1545 	u32 opts2 = le32_to_cpu(desc->opts2);
1546 
1547 	if (opts2 & RxVlanTag)
1548 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
1549 }
1550 
1551 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1552 			     void *p)
1553 {
1554 	struct rtl8169_private *tp = netdev_priv(dev);
1555 	u32 __iomem *data = tp->mmio_addr;
1556 	u32 *dw = p;
1557 	int i;
1558 
1559 	rtl_lock_work(tp);
1560 	for (i = 0; i < R8169_REGS_SIZE; i += 4)
1561 		memcpy_fromio(dw++, data++, 4);
1562 	rtl_unlock_work(tp);
1563 }
1564 
1565 static u32 rtl8169_get_msglevel(struct net_device *dev)
1566 {
1567 	struct rtl8169_private *tp = netdev_priv(dev);
1568 
1569 	return tp->msg_enable;
1570 }
1571 
1572 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1573 {
1574 	struct rtl8169_private *tp = netdev_priv(dev);
1575 
1576 	tp->msg_enable = value;
1577 }
1578 
1579 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1580 	"tx_packets",
1581 	"rx_packets",
1582 	"tx_errors",
1583 	"rx_errors",
1584 	"rx_missed",
1585 	"align_errors",
1586 	"tx_single_collisions",
1587 	"tx_multi_collisions",
1588 	"unicast",
1589 	"broadcast",
1590 	"multicast",
1591 	"tx_aborted",
1592 	"tx_underrun",
1593 };
1594 
1595 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1596 {
1597 	switch (sset) {
1598 	case ETH_SS_STATS:
1599 		return ARRAY_SIZE(rtl8169_gstrings);
1600 	default:
1601 		return -EOPNOTSUPP;
1602 	}
1603 }
1604 
1605 DECLARE_RTL_COND(rtl_counters_cond)
1606 {
1607 	return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
1608 }
1609 
1610 static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
1611 {
1612 	dma_addr_t paddr = tp->counters_phys_addr;
1613 	u32 cmd;
1614 
1615 	RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32);
1616 	RTL_R32(tp, CounterAddrHigh);
1617 	cmd = (u64)paddr & DMA_BIT_MASK(32);
1618 	RTL_W32(tp, CounterAddrLow, cmd);
1619 	RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
1620 
1621 	return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
1622 }
1623 
1624 static bool rtl8169_reset_counters(struct rtl8169_private *tp)
1625 {
1626 	/*
1627 	 * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
1628 	 * tally counters.
1629 	 */
1630 	if (tp->mac_version < RTL_GIGA_MAC_VER_19)
1631 		return true;
1632 
1633 	return rtl8169_do_counters(tp, CounterReset);
1634 }
1635 
1636 static bool rtl8169_update_counters(struct rtl8169_private *tp)
1637 {
1638 	u8 val = RTL_R8(tp, ChipCmd);
1639 
1640 	/*
1641 	 * Some chips are unable to dump tally counters when the receiver
1642 	 * is disabled. If 0xff chip may be in a PCI power-save state.
1643 	 */
1644 	if (!(val & CmdRxEnb) || val == 0xff)
1645 		return true;
1646 
1647 	return rtl8169_do_counters(tp, CounterDump);
1648 }
1649 
1650 static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
1651 {
1652 	struct rtl8169_counters *counters = tp->counters;
1653 	bool ret = false;
1654 
1655 	/*
1656 	 * rtl8169_init_counter_offsets is called from rtl_open.  On chip
1657 	 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
1658 	 * reset by a power cycle, while the counter values collected by the
1659 	 * driver are reset at every driver unload/load cycle.
1660 	 *
1661 	 * To make sure the HW values returned by @get_stats64 match the SW
1662 	 * values, we collect the initial values at first open(*) and use them
1663 	 * as offsets to normalize the values returned by @get_stats64.
1664 	 *
1665 	 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
1666 	 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
1667 	 * set at open time by rtl_hw_start.
1668 	 */
1669 
1670 	if (tp->tc_offset.inited)
1671 		return true;
1672 
1673 	/* If both, reset and update fail, propagate to caller. */
1674 	if (rtl8169_reset_counters(tp))
1675 		ret = true;
1676 
1677 	if (rtl8169_update_counters(tp))
1678 		ret = true;
1679 
1680 	tp->tc_offset.tx_errors = counters->tx_errors;
1681 	tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
1682 	tp->tc_offset.tx_aborted = counters->tx_aborted;
1683 	tp->tc_offset.inited = true;
1684 
1685 	return ret;
1686 }
1687 
1688 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1689 				      struct ethtool_stats *stats, u64 *data)
1690 {
1691 	struct rtl8169_private *tp = netdev_priv(dev);
1692 	struct device *d = tp_to_dev(tp);
1693 	struct rtl8169_counters *counters = tp->counters;
1694 
1695 	ASSERT_RTNL();
1696 
1697 	pm_runtime_get_noresume(d);
1698 
1699 	if (pm_runtime_active(d))
1700 		rtl8169_update_counters(tp);
1701 
1702 	pm_runtime_put_noidle(d);
1703 
1704 	data[0] = le64_to_cpu(counters->tx_packets);
1705 	data[1] = le64_to_cpu(counters->rx_packets);
1706 	data[2] = le64_to_cpu(counters->tx_errors);
1707 	data[3] = le32_to_cpu(counters->rx_errors);
1708 	data[4] = le16_to_cpu(counters->rx_missed);
1709 	data[5] = le16_to_cpu(counters->align_errors);
1710 	data[6] = le32_to_cpu(counters->tx_one_collision);
1711 	data[7] = le32_to_cpu(counters->tx_multi_collision);
1712 	data[8] = le64_to_cpu(counters->rx_unicast);
1713 	data[9] = le64_to_cpu(counters->rx_broadcast);
1714 	data[10] = le32_to_cpu(counters->rx_multicast);
1715 	data[11] = le16_to_cpu(counters->tx_aborted);
1716 	data[12] = le16_to_cpu(counters->tx_underun);
1717 }
1718 
1719 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1720 {
1721 	switch(stringset) {
1722 	case ETH_SS_STATS:
1723 		memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1724 		break;
1725 	}
1726 }
1727 
1728 /*
1729  * Interrupt coalescing
1730  *
1731  * > 1 - the availability of the IntrMitigate (0xe2) register through the
1732  * >     8169, 8168 and 810x line of chipsets
1733  *
1734  * 8169, 8168, and 8136(810x) serial chipsets support it.
1735  *
1736  * > 2 - the Tx timer unit at gigabit speed
1737  *
1738  * The unit of the timer depends on both the speed and the setting of CPlusCmd
1739  * (0xe0) bit 1 and bit 0.
1740  *
1741  * For 8169
1742  * bit[1:0] \ speed        1000M           100M            10M
1743  * 0 0                     320ns           2.56us          40.96us
1744  * 0 1                     2.56us          20.48us         327.7us
1745  * 1 0                     5.12us          40.96us         655.4us
1746  * 1 1                     10.24us         81.92us         1.31ms
1747  *
1748  * For the other
1749  * bit[1:0] \ speed        1000M           100M            10M
1750  * 0 0                     5us             2.56us          40.96us
1751  * 0 1                     40us            20.48us         327.7us
1752  * 1 0                     80us            40.96us         655.4us
1753  * 1 1                     160us           81.92us         1.31ms
1754  */
1755 
1756 /* rx/tx scale factors for one particular CPlusCmd[0:1] value */
1757 struct rtl_coalesce_scale {
1758 	/* Rx / Tx */
1759 	u32 nsecs[2];
1760 };
1761 
1762 /* rx/tx scale factors for all CPlusCmd[0:1] cases */
1763 struct rtl_coalesce_info {
1764 	u32 speed;
1765 	struct rtl_coalesce_scale scalev[4];	/* each CPlusCmd[0:1] case */
1766 };
1767 
1768 /* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */
1769 #define rxtx_x1822(r, t) {		\
1770 	{{(r),		(t)}},		\
1771 	{{(r)*8,	(t)*8}},	\
1772 	{{(r)*8*2,	(t)*8*2}},	\
1773 	{{(r)*8*2*2,	(t)*8*2*2}},	\
1774 }
1775 static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
1776 	/* speed	delays:     rx00   tx00	*/
1777 	{ SPEED_10,	rxtx_x1822(40960, 40960)	},
1778 	{ SPEED_100,	rxtx_x1822( 2560,  2560)	},
1779 	{ SPEED_1000,	rxtx_x1822(  320,   320)	},
1780 	{ 0 },
1781 };
1782 
1783 static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
1784 	/* speed	delays:     rx00   tx00	*/
1785 	{ SPEED_10,	rxtx_x1822(40960, 40960)	},
1786 	{ SPEED_100,	rxtx_x1822( 2560,  2560)	},
1787 	{ SPEED_1000,	rxtx_x1822( 5000,  5000)	},
1788 	{ 0 },
1789 };
1790 #undef rxtx_x1822
1791 
1792 /* get rx/tx scale vector corresponding to current speed */
1793 static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
1794 {
1795 	struct rtl8169_private *tp = netdev_priv(dev);
1796 	const struct rtl_coalesce_info *ci;
1797 
1798 	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
1799 		ci = rtl_coalesce_info_8169;
1800 	else
1801 		ci = rtl_coalesce_info_8168_8136;
1802 
1803 	for (; ci->speed; ci++) {
1804 		if (tp->phydev->speed == ci->speed)
1805 			return ci;
1806 	}
1807 
1808 	return ERR_PTR(-ELNRNG);
1809 }
1810 
1811 static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1812 {
1813 	struct rtl8169_private *tp = netdev_priv(dev);
1814 	const struct rtl_coalesce_info *ci;
1815 	const struct rtl_coalesce_scale *scale;
1816 	struct {
1817 		u32 *max_frames;
1818 		u32 *usecs;
1819 	} coal_settings [] = {
1820 		{ &ec->rx_max_coalesced_frames, &ec->rx_coalesce_usecs },
1821 		{ &ec->tx_max_coalesced_frames, &ec->tx_coalesce_usecs }
1822 	}, *p = coal_settings;
1823 	int i;
1824 	u16 w;
1825 
1826 	memset(ec, 0, sizeof(*ec));
1827 
1828 	/* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
1829 	ci = rtl_coalesce_info(dev);
1830 	if (IS_ERR(ci))
1831 		return PTR_ERR(ci);
1832 
1833 	scale = &ci->scalev[tp->cp_cmd & INTT_MASK];
1834 
1835 	/* read IntrMitigate and adjust according to scale */
1836 	for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
1837 		*p->max_frames = (w & RTL_COALESCE_MASK) << 2;
1838 		w >>= RTL_COALESCE_SHIFT;
1839 		*p->usecs = w & RTL_COALESCE_MASK;
1840 	}
1841 
1842 	for (i = 0; i < 2; i++) {
1843 		p = coal_settings + i;
1844 		*p->usecs = (*p->usecs * scale->nsecs[i]) / 1000;
1845 
1846 		/*
1847 		 * ethtool_coalesce says it is illegal to set both usecs and
1848 		 * max_frames to 0.
1849 		 */
1850 		if (!*p->usecs && !*p->max_frames)
1851 			*p->max_frames = 1;
1852 	}
1853 
1854 	return 0;
1855 }
1856 
1857 /* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */
1858 static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale(
1859 			struct net_device *dev, u32 nsec, u16 *cp01)
1860 {
1861 	const struct rtl_coalesce_info *ci;
1862 	u16 i;
1863 
1864 	ci = rtl_coalesce_info(dev);
1865 	if (IS_ERR(ci))
1866 		return ERR_CAST(ci);
1867 
1868 	for (i = 0; i < 4; i++) {
1869 		u32 rxtx_maxscale = max(ci->scalev[i].nsecs[0],
1870 					ci->scalev[i].nsecs[1]);
1871 		if (nsec <= rxtx_maxscale * RTL_COALESCE_T_MAX) {
1872 			*cp01 = i;
1873 			return &ci->scalev[i];
1874 		}
1875 	}
1876 
1877 	return ERR_PTR(-EINVAL);
1878 }
1879 
1880 static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1881 {
1882 	struct rtl8169_private *tp = netdev_priv(dev);
1883 	const struct rtl_coalesce_scale *scale;
1884 	struct {
1885 		u32 frames;
1886 		u32 usecs;
1887 	} coal_settings [] = {
1888 		{ ec->rx_max_coalesced_frames, ec->rx_coalesce_usecs },
1889 		{ ec->tx_max_coalesced_frames, ec->tx_coalesce_usecs }
1890 	}, *p = coal_settings;
1891 	u16 w = 0, cp01;
1892 	int i;
1893 
1894 	scale = rtl_coalesce_choose_scale(dev,
1895 			max(p[0].usecs, p[1].usecs) * 1000, &cp01);
1896 	if (IS_ERR(scale))
1897 		return PTR_ERR(scale);
1898 
1899 	for (i = 0; i < 2; i++, p++) {
1900 		u32 units;
1901 
1902 		/*
1903 		 * accept max_frames=1 we returned in rtl_get_coalesce.
1904 		 * accept it not only when usecs=0 because of e.g. the following scenario:
1905 		 *
1906 		 * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
1907 		 * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
1908 		 * - then user does `ethtool -C eth0 rx-usecs 100`
1909 		 *
1910 		 * since ethtool sends to kernel whole ethtool_coalesce
1911 		 * settings, if we do not handle rx_usecs=!0, rx_frames=1
1912 		 * we'll reject it below in `frames % 4 != 0`.
1913 		 */
1914 		if (p->frames == 1) {
1915 			p->frames = 0;
1916 		}
1917 
1918 		units = p->usecs * 1000 / scale->nsecs[i];
1919 		if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4)
1920 			return -EINVAL;
1921 
1922 		w <<= RTL_COALESCE_SHIFT;
1923 		w |= units;
1924 		w <<= RTL_COALESCE_SHIFT;
1925 		w |= p->frames >> 2;
1926 	}
1927 
1928 	rtl_lock_work(tp);
1929 
1930 	RTL_W16(tp, IntrMitigate, swab16(w));
1931 
1932 	tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
1933 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1934 	RTL_R16(tp, CPlusCmd);
1935 
1936 	rtl_unlock_work(tp);
1937 
1938 	return 0;
1939 }
1940 
1941 static int rtl_get_eee_supp(struct rtl8169_private *tp)
1942 {
1943 	struct phy_device *phydev = tp->phydev;
1944 	int ret;
1945 
1946 	switch (tp->mac_version) {
1947 	case RTL_GIGA_MAC_VER_34:
1948 	case RTL_GIGA_MAC_VER_35:
1949 	case RTL_GIGA_MAC_VER_36:
1950 	case RTL_GIGA_MAC_VER_38:
1951 		ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1952 		break;
1953 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1954 		ret = phy_read_paged(phydev, 0x0a5c, 0x12);
1955 		break;
1956 	default:
1957 		ret = -EPROTONOSUPPORT;
1958 		break;
1959 	}
1960 
1961 	return ret;
1962 }
1963 
1964 static int rtl_get_eee_lpadv(struct rtl8169_private *tp)
1965 {
1966 	struct phy_device *phydev = tp->phydev;
1967 	int ret;
1968 
1969 	switch (tp->mac_version) {
1970 	case RTL_GIGA_MAC_VER_34:
1971 	case RTL_GIGA_MAC_VER_35:
1972 	case RTL_GIGA_MAC_VER_36:
1973 	case RTL_GIGA_MAC_VER_38:
1974 		ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
1975 		break;
1976 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1977 		ret = phy_read_paged(phydev, 0x0a5d, 0x11);
1978 		break;
1979 	default:
1980 		ret = -EPROTONOSUPPORT;
1981 		break;
1982 	}
1983 
1984 	return ret;
1985 }
1986 
1987 static int rtl_get_eee_adv(struct rtl8169_private *tp)
1988 {
1989 	struct phy_device *phydev = tp->phydev;
1990 	int ret;
1991 
1992 	switch (tp->mac_version) {
1993 	case RTL_GIGA_MAC_VER_34:
1994 	case RTL_GIGA_MAC_VER_35:
1995 	case RTL_GIGA_MAC_VER_36:
1996 	case RTL_GIGA_MAC_VER_38:
1997 		ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1998 		break;
1999 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
2000 		ret = phy_read_paged(phydev, 0x0a5d, 0x10);
2001 		break;
2002 	default:
2003 		ret = -EPROTONOSUPPORT;
2004 		break;
2005 	}
2006 
2007 	return ret;
2008 }
2009 
2010 static int rtl_set_eee_adv(struct rtl8169_private *tp, int val)
2011 {
2012 	struct phy_device *phydev = tp->phydev;
2013 	int ret = 0;
2014 
2015 	switch (tp->mac_version) {
2016 	case RTL_GIGA_MAC_VER_34:
2017 	case RTL_GIGA_MAC_VER_35:
2018 	case RTL_GIGA_MAC_VER_36:
2019 	case RTL_GIGA_MAC_VER_38:
2020 		ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
2021 		break;
2022 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
2023 		phy_write_paged(phydev, 0x0a5d, 0x10, val);
2024 		break;
2025 	default:
2026 		ret = -EPROTONOSUPPORT;
2027 		break;
2028 	}
2029 
2030 	return ret;
2031 }
2032 
2033 static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
2034 {
2035 	struct rtl8169_private *tp = netdev_priv(dev);
2036 	struct device *d = tp_to_dev(tp);
2037 	int ret;
2038 
2039 	pm_runtime_get_noresume(d);
2040 
2041 	if (!pm_runtime_active(d)) {
2042 		ret = -EOPNOTSUPP;
2043 		goto out;
2044 	}
2045 
2046 	/* Get Supported EEE */
2047 	ret = rtl_get_eee_supp(tp);
2048 	if (ret < 0)
2049 		goto out;
2050 	data->supported = mmd_eee_cap_to_ethtool_sup_t(ret);
2051 
2052 	/* Get advertisement EEE */
2053 	ret = rtl_get_eee_adv(tp);
2054 	if (ret < 0)
2055 		goto out;
2056 	data->advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
2057 	data->eee_enabled = !!data->advertised;
2058 
2059 	/* Get LP advertisement EEE */
2060 	ret = rtl_get_eee_lpadv(tp);
2061 	if (ret < 0)
2062 		goto out;
2063 	data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
2064 	data->eee_active = !!(data->advertised & data->lp_advertised);
2065 out:
2066 	pm_runtime_put_noidle(d);
2067 	return ret < 0 ? ret : 0;
2068 }
2069 
2070 static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
2071 {
2072 	struct rtl8169_private *tp = netdev_priv(dev);
2073 	struct device *d = tp_to_dev(tp);
2074 	int old_adv, adv = 0, cap, ret;
2075 
2076 	pm_runtime_get_noresume(d);
2077 
2078 	if (!dev->phydev || !pm_runtime_active(d)) {
2079 		ret = -EOPNOTSUPP;
2080 		goto out;
2081 	}
2082 
2083 	if (dev->phydev->autoneg == AUTONEG_DISABLE ||
2084 	    dev->phydev->duplex != DUPLEX_FULL) {
2085 		ret = -EPROTONOSUPPORT;
2086 		goto out;
2087 	}
2088 
2089 	/* Get Supported EEE */
2090 	ret = rtl_get_eee_supp(tp);
2091 	if (ret < 0)
2092 		goto out;
2093 	cap = ret;
2094 
2095 	ret = rtl_get_eee_adv(tp);
2096 	if (ret < 0)
2097 		goto out;
2098 	old_adv = ret;
2099 
2100 	if (data->eee_enabled) {
2101 		adv = !data->advertised ? cap :
2102 		      ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
2103 		/* Mask prohibited EEE modes */
2104 		adv &= ~dev->phydev->eee_broken_modes;
2105 	}
2106 
2107 	if (old_adv != adv) {
2108 		ret = rtl_set_eee_adv(tp, adv);
2109 		if (ret < 0)
2110 			goto out;
2111 
2112 		/* Restart autonegotiation so the new modes get sent to the
2113 		 * link partner.
2114 		 */
2115 		ret = phy_restart_aneg(dev->phydev);
2116 	}
2117 
2118 out:
2119 	pm_runtime_put_noidle(d);
2120 	return ret < 0 ? ret : 0;
2121 }
2122 
2123 static const struct ethtool_ops rtl8169_ethtool_ops = {
2124 	.get_drvinfo		= rtl8169_get_drvinfo,
2125 	.get_regs_len		= rtl8169_get_regs_len,
2126 	.get_link		= ethtool_op_get_link,
2127 	.get_coalesce		= rtl_get_coalesce,
2128 	.set_coalesce		= rtl_set_coalesce,
2129 	.get_msglevel		= rtl8169_get_msglevel,
2130 	.set_msglevel		= rtl8169_set_msglevel,
2131 	.get_regs		= rtl8169_get_regs,
2132 	.get_wol		= rtl8169_get_wol,
2133 	.set_wol		= rtl8169_set_wol,
2134 	.get_strings		= rtl8169_get_strings,
2135 	.get_sset_count		= rtl8169_get_sset_count,
2136 	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
2137 	.get_ts_info		= ethtool_op_get_ts_info,
2138 	.nway_reset		= phy_ethtool_nway_reset,
2139 	.get_eee		= rtl8169_get_eee,
2140 	.set_eee		= rtl8169_set_eee,
2141 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
2142 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
2143 };
2144 
2145 static void rtl_enable_eee(struct rtl8169_private *tp)
2146 {
2147 	int supported = rtl_get_eee_supp(tp);
2148 
2149 	if (supported > 0)
2150 		rtl_set_eee_adv(tp, supported);
2151 }
2152 
2153 static void rtl8169_get_mac_version(struct rtl8169_private *tp)
2154 {
2155 	/*
2156 	 * The driver currently handles the 8168Bf and the 8168Be identically
2157 	 * but they can be identified more specifically through the test below
2158 	 * if needed:
2159 	 *
2160 	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2161 	 *
2162 	 * Same thing for the 8101Eb and the 8101Ec:
2163 	 *
2164 	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2165 	 */
2166 	static const struct rtl_mac_info {
2167 		u16 mask;
2168 		u16 val;
2169 		u16 mac_version;
2170 	} mac_info[] = {
2171 		/* 8168EP family. */
2172 		{ 0x7cf, 0x502,	RTL_GIGA_MAC_VER_51 },
2173 		{ 0x7cf, 0x501,	RTL_GIGA_MAC_VER_50 },
2174 		{ 0x7cf, 0x500,	RTL_GIGA_MAC_VER_49 },
2175 
2176 		/* 8168H family. */
2177 		{ 0x7cf, 0x541,	RTL_GIGA_MAC_VER_46 },
2178 		{ 0x7cf, 0x540,	RTL_GIGA_MAC_VER_45 },
2179 
2180 		/* 8168G family. */
2181 		{ 0x7cf, 0x5c8,	RTL_GIGA_MAC_VER_44 },
2182 		{ 0x7cf, 0x509,	RTL_GIGA_MAC_VER_42 },
2183 		{ 0x7cf, 0x4c1,	RTL_GIGA_MAC_VER_41 },
2184 		{ 0x7cf, 0x4c0,	RTL_GIGA_MAC_VER_40 },
2185 
2186 		/* 8168F family. */
2187 		{ 0x7c8, 0x488,	RTL_GIGA_MAC_VER_38 },
2188 		{ 0x7cf, 0x481,	RTL_GIGA_MAC_VER_36 },
2189 		{ 0x7cf, 0x480,	RTL_GIGA_MAC_VER_35 },
2190 
2191 		/* 8168E family. */
2192 		{ 0x7c8, 0x2c8,	RTL_GIGA_MAC_VER_34 },
2193 		{ 0x7cf, 0x2c1,	RTL_GIGA_MAC_VER_32 },
2194 		{ 0x7c8, 0x2c0,	RTL_GIGA_MAC_VER_33 },
2195 
2196 		/* 8168D family. */
2197 		{ 0x7cf, 0x281,	RTL_GIGA_MAC_VER_25 },
2198 		{ 0x7c8, 0x280,	RTL_GIGA_MAC_VER_26 },
2199 
2200 		/* 8168DP family. */
2201 		{ 0x7cf, 0x288,	RTL_GIGA_MAC_VER_27 },
2202 		{ 0x7cf, 0x28a,	RTL_GIGA_MAC_VER_28 },
2203 		{ 0x7cf, 0x28b,	RTL_GIGA_MAC_VER_31 },
2204 
2205 		/* 8168C family. */
2206 		{ 0x7cf, 0x3c9,	RTL_GIGA_MAC_VER_23 },
2207 		{ 0x7cf, 0x3c8,	RTL_GIGA_MAC_VER_18 },
2208 		{ 0x7c8, 0x3c8,	RTL_GIGA_MAC_VER_24 },
2209 		{ 0x7cf, 0x3c0,	RTL_GIGA_MAC_VER_19 },
2210 		{ 0x7cf, 0x3c2,	RTL_GIGA_MAC_VER_20 },
2211 		{ 0x7cf, 0x3c3,	RTL_GIGA_MAC_VER_21 },
2212 		{ 0x7c8, 0x3c0,	RTL_GIGA_MAC_VER_22 },
2213 
2214 		/* 8168B family. */
2215 		{ 0x7cf, 0x380,	RTL_GIGA_MAC_VER_12 },
2216 		{ 0x7c8, 0x380,	RTL_GIGA_MAC_VER_17 },
2217 		{ 0x7c8, 0x300,	RTL_GIGA_MAC_VER_11 },
2218 
2219 		/* 8101 family. */
2220 		{ 0x7c8, 0x448,	RTL_GIGA_MAC_VER_39 },
2221 		{ 0x7c8, 0x440,	RTL_GIGA_MAC_VER_37 },
2222 		{ 0x7cf, 0x409,	RTL_GIGA_MAC_VER_29 },
2223 		{ 0x7c8, 0x408,	RTL_GIGA_MAC_VER_30 },
2224 		{ 0x7cf, 0x349,	RTL_GIGA_MAC_VER_08 },
2225 		{ 0x7cf, 0x249,	RTL_GIGA_MAC_VER_08 },
2226 		{ 0x7cf, 0x348,	RTL_GIGA_MAC_VER_07 },
2227 		{ 0x7cf, 0x248,	RTL_GIGA_MAC_VER_07 },
2228 		{ 0x7cf, 0x340,	RTL_GIGA_MAC_VER_13 },
2229 		{ 0x7cf, 0x343,	RTL_GIGA_MAC_VER_10 },
2230 		{ 0x7cf, 0x342,	RTL_GIGA_MAC_VER_16 },
2231 		{ 0x7c8, 0x348,	RTL_GIGA_MAC_VER_09 },
2232 		{ 0x7c8, 0x248,	RTL_GIGA_MAC_VER_09 },
2233 		{ 0x7c8, 0x340,	RTL_GIGA_MAC_VER_16 },
2234 		/* FIXME: where did these entries come from ? -- FR */
2235 		{ 0xfc8, 0x388,	RTL_GIGA_MAC_VER_15 },
2236 		{ 0xfc8, 0x308,	RTL_GIGA_MAC_VER_14 },
2237 
2238 		/* 8110 family. */
2239 		{ 0xfc8, 0x980,	RTL_GIGA_MAC_VER_06 },
2240 		{ 0xfc8, 0x180,	RTL_GIGA_MAC_VER_05 },
2241 		{ 0xfc8, 0x100,	RTL_GIGA_MAC_VER_04 },
2242 		{ 0xfc8, 0x040,	RTL_GIGA_MAC_VER_03 },
2243 		{ 0xfc8, 0x008,	RTL_GIGA_MAC_VER_02 },
2244 
2245 		/* Catch-all */
2246 		{ 0x000, 0x000,	RTL_GIGA_MAC_NONE   }
2247 	};
2248 	const struct rtl_mac_info *p = mac_info;
2249 	u16 reg = RTL_R32(tp, TxConfig) >> 20;
2250 
2251 	while ((reg & p->mask) != p->val)
2252 		p++;
2253 	tp->mac_version = p->mac_version;
2254 
2255 	if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2256 		dev_err(tp_to_dev(tp), "unknown chip XID %03x\n", reg & 0xfcf);
2257 	} else if (!tp->supports_gmii) {
2258 		if (tp->mac_version == RTL_GIGA_MAC_VER_42)
2259 			tp->mac_version = RTL_GIGA_MAC_VER_43;
2260 		else if (tp->mac_version == RTL_GIGA_MAC_VER_45)
2261 			tp->mac_version = RTL_GIGA_MAC_VER_47;
2262 		else if (tp->mac_version == RTL_GIGA_MAC_VER_46)
2263 			tp->mac_version = RTL_GIGA_MAC_VER_48;
2264 	}
2265 }
2266 
2267 struct phy_reg {
2268 	u16 reg;
2269 	u16 val;
2270 };
2271 
2272 static void __rtl_writephy_batch(struct rtl8169_private *tp,
2273 				 const struct phy_reg *regs, int len)
2274 {
2275 	while (len-- > 0) {
2276 		rtl_writephy(tp, regs->reg, regs->val);
2277 		regs++;
2278 	}
2279 }
2280 
2281 #define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a))
2282 
2283 static void rtl_release_firmware(struct rtl8169_private *tp)
2284 {
2285 	if (tp->rtl_fw) {
2286 		rtl_fw_release_firmware(tp->rtl_fw);
2287 		kfree(tp->rtl_fw);
2288 		tp->rtl_fw = NULL;
2289 	}
2290 }
2291 
2292 static void rtl_apply_firmware(struct rtl8169_private *tp)
2293 {
2294 	/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
2295 	if (tp->rtl_fw)
2296 		rtl_fw_write_firmware(tp, tp->rtl_fw);
2297 }
2298 
2299 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2300 {
2301 	if (rtl_readphy(tp, reg) != val)
2302 		netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2303 	else
2304 		rtl_apply_firmware(tp);
2305 }
2306 
2307 static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
2308 {
2309 	/* Adjust EEE LED frequency */
2310 	if (tp->mac_version != RTL_GIGA_MAC_VER_38)
2311 		RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
2312 
2313 	rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003);
2314 }
2315 
2316 static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
2317 {
2318 	struct phy_device *phydev = tp->phydev;
2319 
2320 	phy_write(phydev, 0x1f, 0x0007);
2321 	phy_write(phydev, 0x1e, 0x0020);
2322 	phy_set_bits(phydev, 0x15, BIT(8));
2323 
2324 	phy_write(phydev, 0x1f, 0x0005);
2325 	phy_write(phydev, 0x05, 0x8b85);
2326 	phy_set_bits(phydev, 0x06, BIT(13));
2327 
2328 	phy_write(phydev, 0x1f, 0x0000);
2329 }
2330 
2331 static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
2332 {
2333 	phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4));
2334 }
2335 
2336 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2337 {
2338 	static const struct phy_reg phy_reg_init[] = {
2339 		{ 0x1f, 0x0001 },
2340 		{ 0x06, 0x006e },
2341 		{ 0x08, 0x0708 },
2342 		{ 0x15, 0x4000 },
2343 		{ 0x18, 0x65c7 },
2344 
2345 		{ 0x1f, 0x0001 },
2346 		{ 0x03, 0x00a1 },
2347 		{ 0x02, 0x0008 },
2348 		{ 0x01, 0x0120 },
2349 		{ 0x00, 0x1000 },
2350 		{ 0x04, 0x0800 },
2351 		{ 0x04, 0x0000 },
2352 
2353 		{ 0x03, 0xff41 },
2354 		{ 0x02, 0xdf60 },
2355 		{ 0x01, 0x0140 },
2356 		{ 0x00, 0x0077 },
2357 		{ 0x04, 0x7800 },
2358 		{ 0x04, 0x7000 },
2359 
2360 		{ 0x03, 0x802f },
2361 		{ 0x02, 0x4f02 },
2362 		{ 0x01, 0x0409 },
2363 		{ 0x00, 0xf0f9 },
2364 		{ 0x04, 0x9800 },
2365 		{ 0x04, 0x9000 },
2366 
2367 		{ 0x03, 0xdf01 },
2368 		{ 0x02, 0xdf20 },
2369 		{ 0x01, 0xff95 },
2370 		{ 0x00, 0xba00 },
2371 		{ 0x04, 0xa800 },
2372 		{ 0x04, 0xa000 },
2373 
2374 		{ 0x03, 0xff41 },
2375 		{ 0x02, 0xdf20 },
2376 		{ 0x01, 0x0140 },
2377 		{ 0x00, 0x00bb },
2378 		{ 0x04, 0xb800 },
2379 		{ 0x04, 0xb000 },
2380 
2381 		{ 0x03, 0xdf41 },
2382 		{ 0x02, 0xdc60 },
2383 		{ 0x01, 0x6340 },
2384 		{ 0x00, 0x007d },
2385 		{ 0x04, 0xd800 },
2386 		{ 0x04, 0xd000 },
2387 
2388 		{ 0x03, 0xdf01 },
2389 		{ 0x02, 0xdf20 },
2390 		{ 0x01, 0x100a },
2391 		{ 0x00, 0xa0ff },
2392 		{ 0x04, 0xf800 },
2393 		{ 0x04, 0xf000 },
2394 
2395 		{ 0x1f, 0x0000 },
2396 		{ 0x0b, 0x0000 },
2397 		{ 0x00, 0x9200 }
2398 	};
2399 
2400 	rtl_writephy_batch(tp, phy_reg_init);
2401 }
2402 
2403 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2404 {
2405 	static const struct phy_reg phy_reg_init[] = {
2406 		{ 0x1f, 0x0002 },
2407 		{ 0x01, 0x90d0 },
2408 		{ 0x1f, 0x0000 }
2409 	};
2410 
2411 	rtl_writephy_batch(tp, phy_reg_init);
2412 }
2413 
2414 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2415 {
2416 	struct pci_dev *pdev = tp->pci_dev;
2417 
2418 	if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2419 	    (pdev->subsystem_device != 0xe000))
2420 		return;
2421 
2422 	rtl_writephy(tp, 0x1f, 0x0001);
2423 	rtl_writephy(tp, 0x10, 0xf01b);
2424 	rtl_writephy(tp, 0x1f, 0x0000);
2425 }
2426 
2427 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2428 {
2429 	static const struct phy_reg phy_reg_init[] = {
2430 		{ 0x1f, 0x0001 },
2431 		{ 0x04, 0x0000 },
2432 		{ 0x03, 0x00a1 },
2433 		{ 0x02, 0x0008 },
2434 		{ 0x01, 0x0120 },
2435 		{ 0x00, 0x1000 },
2436 		{ 0x04, 0x0800 },
2437 		{ 0x04, 0x9000 },
2438 		{ 0x03, 0x802f },
2439 		{ 0x02, 0x4f02 },
2440 		{ 0x01, 0x0409 },
2441 		{ 0x00, 0xf099 },
2442 		{ 0x04, 0x9800 },
2443 		{ 0x04, 0xa000 },
2444 		{ 0x03, 0xdf01 },
2445 		{ 0x02, 0xdf20 },
2446 		{ 0x01, 0xff95 },
2447 		{ 0x00, 0xba00 },
2448 		{ 0x04, 0xa800 },
2449 		{ 0x04, 0xf000 },
2450 		{ 0x03, 0xdf01 },
2451 		{ 0x02, 0xdf20 },
2452 		{ 0x01, 0x101a },
2453 		{ 0x00, 0xa0ff },
2454 		{ 0x04, 0xf800 },
2455 		{ 0x04, 0x0000 },
2456 		{ 0x1f, 0x0000 },
2457 
2458 		{ 0x1f, 0x0001 },
2459 		{ 0x10, 0xf41b },
2460 		{ 0x14, 0xfb54 },
2461 		{ 0x18, 0xf5c7 },
2462 		{ 0x1f, 0x0000 },
2463 
2464 		{ 0x1f, 0x0001 },
2465 		{ 0x17, 0x0cc0 },
2466 		{ 0x1f, 0x0000 }
2467 	};
2468 
2469 	rtl_writephy_batch(tp, phy_reg_init);
2470 
2471 	rtl8169scd_hw_phy_config_quirk(tp);
2472 }
2473 
2474 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2475 {
2476 	static const struct phy_reg phy_reg_init[] = {
2477 		{ 0x1f, 0x0001 },
2478 		{ 0x04, 0x0000 },
2479 		{ 0x03, 0x00a1 },
2480 		{ 0x02, 0x0008 },
2481 		{ 0x01, 0x0120 },
2482 		{ 0x00, 0x1000 },
2483 		{ 0x04, 0x0800 },
2484 		{ 0x04, 0x9000 },
2485 		{ 0x03, 0x802f },
2486 		{ 0x02, 0x4f02 },
2487 		{ 0x01, 0x0409 },
2488 		{ 0x00, 0xf099 },
2489 		{ 0x04, 0x9800 },
2490 		{ 0x04, 0xa000 },
2491 		{ 0x03, 0xdf01 },
2492 		{ 0x02, 0xdf20 },
2493 		{ 0x01, 0xff95 },
2494 		{ 0x00, 0xba00 },
2495 		{ 0x04, 0xa800 },
2496 		{ 0x04, 0xf000 },
2497 		{ 0x03, 0xdf01 },
2498 		{ 0x02, 0xdf20 },
2499 		{ 0x01, 0x101a },
2500 		{ 0x00, 0xa0ff },
2501 		{ 0x04, 0xf800 },
2502 		{ 0x04, 0x0000 },
2503 		{ 0x1f, 0x0000 },
2504 
2505 		{ 0x1f, 0x0001 },
2506 		{ 0x0b, 0x8480 },
2507 		{ 0x1f, 0x0000 },
2508 
2509 		{ 0x1f, 0x0001 },
2510 		{ 0x18, 0x67c7 },
2511 		{ 0x04, 0x2000 },
2512 		{ 0x03, 0x002f },
2513 		{ 0x02, 0x4360 },
2514 		{ 0x01, 0x0109 },
2515 		{ 0x00, 0x3022 },
2516 		{ 0x04, 0x2800 },
2517 		{ 0x1f, 0x0000 },
2518 
2519 		{ 0x1f, 0x0001 },
2520 		{ 0x17, 0x0cc0 },
2521 		{ 0x1f, 0x0000 }
2522 	};
2523 
2524 	rtl_writephy_batch(tp, phy_reg_init);
2525 }
2526 
2527 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2528 {
2529 	static const struct phy_reg phy_reg_init[] = {
2530 		{ 0x10, 0xf41b },
2531 		{ 0x1f, 0x0000 }
2532 	};
2533 
2534 	rtl_writephy(tp, 0x1f, 0x0001);
2535 	rtl_patchphy(tp, 0x16, 1 << 0);
2536 
2537 	rtl_writephy_batch(tp, phy_reg_init);
2538 }
2539 
2540 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2541 {
2542 	static const struct phy_reg phy_reg_init[] = {
2543 		{ 0x1f, 0x0001 },
2544 		{ 0x10, 0xf41b },
2545 		{ 0x1f, 0x0000 }
2546 	};
2547 
2548 	rtl_writephy_batch(tp, phy_reg_init);
2549 }
2550 
2551 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2552 {
2553 	static const struct phy_reg phy_reg_init[] = {
2554 		{ 0x1f, 0x0000 },
2555 		{ 0x1d, 0x0f00 },
2556 		{ 0x1f, 0x0002 },
2557 		{ 0x0c, 0x1ec8 },
2558 		{ 0x1f, 0x0000 }
2559 	};
2560 
2561 	rtl_writephy_batch(tp, phy_reg_init);
2562 }
2563 
2564 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2565 {
2566 	static const struct phy_reg phy_reg_init[] = {
2567 		{ 0x1f, 0x0001 },
2568 		{ 0x1d, 0x3d98 },
2569 		{ 0x1f, 0x0000 }
2570 	};
2571 
2572 	rtl_writephy(tp, 0x1f, 0x0000);
2573 	rtl_patchphy(tp, 0x14, 1 << 5);
2574 	rtl_patchphy(tp, 0x0d, 1 << 5);
2575 
2576 	rtl_writephy_batch(tp, phy_reg_init);
2577 }
2578 
2579 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2580 {
2581 	static const struct phy_reg phy_reg_init[] = {
2582 		{ 0x1f, 0x0001 },
2583 		{ 0x12, 0x2300 },
2584 		{ 0x1f, 0x0002 },
2585 		{ 0x00, 0x88d4 },
2586 		{ 0x01, 0x82b1 },
2587 		{ 0x03, 0x7002 },
2588 		{ 0x08, 0x9e30 },
2589 		{ 0x09, 0x01f0 },
2590 		{ 0x0a, 0x5500 },
2591 		{ 0x0c, 0x00c8 },
2592 		{ 0x1f, 0x0003 },
2593 		{ 0x12, 0xc096 },
2594 		{ 0x16, 0x000a },
2595 		{ 0x1f, 0x0000 },
2596 		{ 0x1f, 0x0000 },
2597 		{ 0x09, 0x2000 },
2598 		{ 0x09, 0x0000 }
2599 	};
2600 
2601 	rtl_writephy_batch(tp, phy_reg_init);
2602 
2603 	rtl_patchphy(tp, 0x14, 1 << 5);
2604 	rtl_patchphy(tp, 0x0d, 1 << 5);
2605 	rtl_writephy(tp, 0x1f, 0x0000);
2606 }
2607 
2608 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2609 {
2610 	static const struct phy_reg phy_reg_init[] = {
2611 		{ 0x1f, 0x0001 },
2612 		{ 0x12, 0x2300 },
2613 		{ 0x03, 0x802f },
2614 		{ 0x02, 0x4f02 },
2615 		{ 0x01, 0x0409 },
2616 		{ 0x00, 0xf099 },
2617 		{ 0x04, 0x9800 },
2618 		{ 0x04, 0x9000 },
2619 		{ 0x1d, 0x3d98 },
2620 		{ 0x1f, 0x0002 },
2621 		{ 0x0c, 0x7eb8 },
2622 		{ 0x06, 0x0761 },
2623 		{ 0x1f, 0x0003 },
2624 		{ 0x16, 0x0f0a },
2625 		{ 0x1f, 0x0000 }
2626 	};
2627 
2628 	rtl_writephy_batch(tp, phy_reg_init);
2629 
2630 	rtl_patchphy(tp, 0x16, 1 << 0);
2631 	rtl_patchphy(tp, 0x14, 1 << 5);
2632 	rtl_patchphy(tp, 0x0d, 1 << 5);
2633 	rtl_writephy(tp, 0x1f, 0x0000);
2634 }
2635 
2636 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2637 {
2638 	static const struct phy_reg phy_reg_init[] = {
2639 		{ 0x1f, 0x0001 },
2640 		{ 0x12, 0x2300 },
2641 		{ 0x1d, 0x3d98 },
2642 		{ 0x1f, 0x0002 },
2643 		{ 0x0c, 0x7eb8 },
2644 		{ 0x06, 0x5461 },
2645 		{ 0x1f, 0x0003 },
2646 		{ 0x16, 0x0f0a },
2647 		{ 0x1f, 0x0000 }
2648 	};
2649 
2650 	rtl_writephy_batch(tp, phy_reg_init);
2651 
2652 	rtl_patchphy(tp, 0x16, 1 << 0);
2653 	rtl_patchphy(tp, 0x14, 1 << 5);
2654 	rtl_patchphy(tp, 0x0d, 1 << 5);
2655 	rtl_writephy(tp, 0x1f, 0x0000);
2656 }
2657 
2658 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2659 {
2660 	rtl8168c_3_hw_phy_config(tp);
2661 }
2662 
2663 static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
2664 	/* Channel Estimation */
2665 	{ 0x1f, 0x0001 },
2666 	{ 0x06, 0x4064 },
2667 	{ 0x07, 0x2863 },
2668 	{ 0x08, 0x059c },
2669 	{ 0x09, 0x26b4 },
2670 	{ 0x0a, 0x6a19 },
2671 	{ 0x0b, 0xdcc8 },
2672 	{ 0x10, 0xf06d },
2673 	{ 0x14, 0x7f68 },
2674 	{ 0x18, 0x7fd9 },
2675 	{ 0x1c, 0xf0ff },
2676 	{ 0x1d, 0x3d9c },
2677 	{ 0x1f, 0x0003 },
2678 	{ 0x12, 0xf49f },
2679 	{ 0x13, 0x070b },
2680 	{ 0x1a, 0x05ad },
2681 	{ 0x14, 0x94c0 },
2682 
2683 	/*
2684 	 * Tx Error Issue
2685 	 * Enhance line driver power
2686 	 */
2687 	{ 0x1f, 0x0002 },
2688 	{ 0x06, 0x5561 },
2689 	{ 0x1f, 0x0005 },
2690 	{ 0x05, 0x8332 },
2691 	{ 0x06, 0x5561 },
2692 
2693 	/*
2694 	 * Can not link to 1Gbps with bad cable
2695 	 * Decrease SNR threshold form 21.07dB to 19.04dB
2696 	 */
2697 	{ 0x1f, 0x0001 },
2698 	{ 0x17, 0x0cc0 },
2699 
2700 	{ 0x1f, 0x0000 },
2701 	{ 0x0d, 0xf880 }
2702 };
2703 
2704 static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
2705 	{ 0x1f, 0x0002 },
2706 	{ 0x05, 0x669a },
2707 	{ 0x1f, 0x0005 },
2708 	{ 0x05, 0x8330 },
2709 	{ 0x06, 0x669a },
2710 	{ 0x1f, 0x0002 }
2711 };
2712 
2713 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2714 {
2715 	rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
2716 
2717 	/*
2718 	 * Rx Error Issue
2719 	 * Fine Tune Switching regulator parameter
2720 	 */
2721 	rtl_writephy(tp, 0x1f, 0x0002);
2722 	rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef);
2723 	rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00);
2724 
2725 	if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2726 		int val;
2727 
2728 		rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
2729 
2730 		val = rtl_readphy(tp, 0x0d);
2731 
2732 		if ((val & 0x00ff) != 0x006c) {
2733 			static const u32 set[] = {
2734 				0x0065, 0x0066, 0x0067, 0x0068,
2735 				0x0069, 0x006a, 0x006b, 0x006c
2736 			};
2737 			int i;
2738 
2739 			rtl_writephy(tp, 0x1f, 0x0002);
2740 
2741 			val &= 0xff00;
2742 			for (i = 0; i < ARRAY_SIZE(set); i++)
2743 				rtl_writephy(tp, 0x0d, val | set[i]);
2744 		}
2745 	} else {
2746 		static const struct phy_reg phy_reg_init[] = {
2747 			{ 0x1f, 0x0002 },
2748 			{ 0x05, 0x6662 },
2749 			{ 0x1f, 0x0005 },
2750 			{ 0x05, 0x8330 },
2751 			{ 0x06, 0x6662 }
2752 		};
2753 
2754 		rtl_writephy_batch(tp, phy_reg_init);
2755 	}
2756 
2757 	/* RSET couple improve */
2758 	rtl_writephy(tp, 0x1f, 0x0002);
2759 	rtl_patchphy(tp, 0x0d, 0x0300);
2760 	rtl_patchphy(tp, 0x0f, 0x0010);
2761 
2762 	/* Fine tune PLL performance */
2763 	rtl_writephy(tp, 0x1f, 0x0002);
2764 	rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
2765 	rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
2766 
2767 	rtl_writephy(tp, 0x1f, 0x0005);
2768 	rtl_writephy(tp, 0x05, 0x001b);
2769 
2770 	rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2771 
2772 	rtl_writephy(tp, 0x1f, 0x0000);
2773 }
2774 
2775 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2776 {
2777 	rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
2778 
2779 	if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2780 		int val;
2781 
2782 		rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
2783 
2784 		val = rtl_readphy(tp, 0x0d);
2785 		if ((val & 0x00ff) != 0x006c) {
2786 			static const u32 set[] = {
2787 				0x0065, 0x0066, 0x0067, 0x0068,
2788 				0x0069, 0x006a, 0x006b, 0x006c
2789 			};
2790 			int i;
2791 
2792 			rtl_writephy(tp, 0x1f, 0x0002);
2793 
2794 			val &= 0xff00;
2795 			for (i = 0; i < ARRAY_SIZE(set); i++)
2796 				rtl_writephy(tp, 0x0d, val | set[i]);
2797 		}
2798 	} else {
2799 		static const struct phy_reg phy_reg_init[] = {
2800 			{ 0x1f, 0x0002 },
2801 			{ 0x05, 0x2642 },
2802 			{ 0x1f, 0x0005 },
2803 			{ 0x05, 0x8330 },
2804 			{ 0x06, 0x2642 }
2805 		};
2806 
2807 		rtl_writephy_batch(tp, phy_reg_init);
2808 	}
2809 
2810 	/* Fine tune PLL performance */
2811 	rtl_writephy(tp, 0x1f, 0x0002);
2812 	rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
2813 	rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
2814 
2815 	/* Switching regulator Slew rate */
2816 	rtl_writephy(tp, 0x1f, 0x0002);
2817 	rtl_patchphy(tp, 0x0f, 0x0017);
2818 
2819 	rtl_writephy(tp, 0x1f, 0x0005);
2820 	rtl_writephy(tp, 0x05, 0x001b);
2821 
2822 	rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2823 
2824 	rtl_writephy(tp, 0x1f, 0x0000);
2825 }
2826 
2827 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2828 {
2829 	static const struct phy_reg phy_reg_init[] = {
2830 		{ 0x1f, 0x0002 },
2831 		{ 0x10, 0x0008 },
2832 		{ 0x0d, 0x006c },
2833 
2834 		{ 0x1f, 0x0000 },
2835 		{ 0x0d, 0xf880 },
2836 
2837 		{ 0x1f, 0x0001 },
2838 		{ 0x17, 0x0cc0 },
2839 
2840 		{ 0x1f, 0x0001 },
2841 		{ 0x0b, 0xa4d8 },
2842 		{ 0x09, 0x281c },
2843 		{ 0x07, 0x2883 },
2844 		{ 0x0a, 0x6b35 },
2845 		{ 0x1d, 0x3da4 },
2846 		{ 0x1c, 0xeffd },
2847 		{ 0x14, 0x7f52 },
2848 		{ 0x18, 0x7fc6 },
2849 		{ 0x08, 0x0601 },
2850 		{ 0x06, 0x4063 },
2851 		{ 0x10, 0xf074 },
2852 		{ 0x1f, 0x0003 },
2853 		{ 0x13, 0x0789 },
2854 		{ 0x12, 0xf4bd },
2855 		{ 0x1a, 0x04fd },
2856 		{ 0x14, 0x84b0 },
2857 		{ 0x1f, 0x0000 },
2858 		{ 0x00, 0x9200 },
2859 
2860 		{ 0x1f, 0x0005 },
2861 		{ 0x01, 0x0340 },
2862 		{ 0x1f, 0x0001 },
2863 		{ 0x04, 0x4000 },
2864 		{ 0x03, 0x1d21 },
2865 		{ 0x02, 0x0c32 },
2866 		{ 0x01, 0x0200 },
2867 		{ 0x00, 0x5554 },
2868 		{ 0x04, 0x4800 },
2869 		{ 0x04, 0x4000 },
2870 		{ 0x04, 0xf000 },
2871 		{ 0x03, 0xdf01 },
2872 		{ 0x02, 0xdf20 },
2873 		{ 0x01, 0x101a },
2874 		{ 0x00, 0xa0ff },
2875 		{ 0x04, 0xf800 },
2876 		{ 0x04, 0xf000 },
2877 		{ 0x1f, 0x0000 },
2878 
2879 		{ 0x1f, 0x0007 },
2880 		{ 0x1e, 0x0023 },
2881 		{ 0x16, 0x0000 },
2882 		{ 0x1f, 0x0000 }
2883 	};
2884 
2885 	rtl_writephy_batch(tp, phy_reg_init);
2886 }
2887 
2888 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2889 {
2890 	static const struct phy_reg phy_reg_init[] = {
2891 		{ 0x1f, 0x0001 },
2892 		{ 0x17, 0x0cc0 },
2893 
2894 		{ 0x1f, 0x0007 },
2895 		{ 0x1e, 0x002d },
2896 		{ 0x18, 0x0040 },
2897 		{ 0x1f, 0x0000 }
2898 	};
2899 
2900 	rtl_writephy_batch(tp, phy_reg_init);
2901 	rtl_patchphy(tp, 0x0d, 1 << 5);
2902 }
2903 
2904 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
2905 {
2906 	static const struct phy_reg phy_reg_init[] = {
2907 		/* Enable Delay cap */
2908 		{ 0x1f, 0x0005 },
2909 		{ 0x05, 0x8b80 },
2910 		{ 0x06, 0xc896 },
2911 		{ 0x1f, 0x0000 },
2912 
2913 		/* Channel estimation fine tune */
2914 		{ 0x1f, 0x0001 },
2915 		{ 0x0b, 0x6c20 },
2916 		{ 0x07, 0x2872 },
2917 		{ 0x1c, 0xefff },
2918 		{ 0x1f, 0x0003 },
2919 		{ 0x14, 0x6420 },
2920 		{ 0x1f, 0x0000 },
2921 
2922 		/* Update PFM & 10M TX idle timer */
2923 		{ 0x1f, 0x0007 },
2924 		{ 0x1e, 0x002f },
2925 		{ 0x15, 0x1919 },
2926 		{ 0x1f, 0x0000 },
2927 
2928 		{ 0x1f, 0x0007 },
2929 		{ 0x1e, 0x00ac },
2930 		{ 0x18, 0x0006 },
2931 		{ 0x1f, 0x0000 }
2932 	};
2933 
2934 	rtl_apply_firmware(tp);
2935 
2936 	rtl_writephy_batch(tp, phy_reg_init);
2937 
2938 	/* DCO enable for 10M IDLE Power */
2939 	rtl_writephy(tp, 0x1f, 0x0007);
2940 	rtl_writephy(tp, 0x1e, 0x0023);
2941 	rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
2942 	rtl_writephy(tp, 0x1f, 0x0000);
2943 
2944 	/* For impedance matching */
2945 	rtl_writephy(tp, 0x1f, 0x0002);
2946 	rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00);
2947 	rtl_writephy(tp, 0x1f, 0x0000);
2948 
2949 	/* PHY auto speed down */
2950 	rtl_writephy(tp, 0x1f, 0x0007);
2951 	rtl_writephy(tp, 0x1e, 0x002d);
2952 	rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000);
2953 	rtl_writephy(tp, 0x1f, 0x0000);
2954 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
2955 
2956 	rtl_writephy(tp, 0x1f, 0x0005);
2957 	rtl_writephy(tp, 0x05, 0x8b86);
2958 	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
2959 	rtl_writephy(tp, 0x1f, 0x0000);
2960 
2961 	rtl_writephy(tp, 0x1f, 0x0005);
2962 	rtl_writephy(tp, 0x05, 0x8b85);
2963 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
2964 	rtl_writephy(tp, 0x1f, 0x0007);
2965 	rtl_writephy(tp, 0x1e, 0x0020);
2966 	rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100);
2967 	rtl_writephy(tp, 0x1f, 0x0006);
2968 	rtl_writephy(tp, 0x00, 0x5a00);
2969 	rtl_writephy(tp, 0x1f, 0x0000);
2970 	rtl_writephy(tp, 0x0d, 0x0007);
2971 	rtl_writephy(tp, 0x0e, 0x003c);
2972 	rtl_writephy(tp, 0x0d, 0x4007);
2973 	rtl_writephy(tp, 0x0e, 0x0000);
2974 	rtl_writephy(tp, 0x0d, 0x0000);
2975 }
2976 
2977 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
2978 {
2979 	const u16 w[] = {
2980 		addr[0] | (addr[1] << 8),
2981 		addr[2] | (addr[3] << 8),
2982 		addr[4] | (addr[5] << 8)
2983 	};
2984 
2985 	rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16));
2986 	rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]);
2987 	rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16);
2988 	rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
2989 }
2990 
2991 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2992 {
2993 	static const struct phy_reg phy_reg_init[] = {
2994 		/* Enable Delay cap */
2995 		{ 0x1f, 0x0004 },
2996 		{ 0x1f, 0x0007 },
2997 		{ 0x1e, 0x00ac },
2998 		{ 0x18, 0x0006 },
2999 		{ 0x1f, 0x0002 },
3000 		{ 0x1f, 0x0000 },
3001 		{ 0x1f, 0x0000 },
3002 
3003 		/* Channel estimation fine tune */
3004 		{ 0x1f, 0x0003 },
3005 		{ 0x09, 0xa20f },
3006 		{ 0x1f, 0x0000 },
3007 		{ 0x1f, 0x0000 },
3008 
3009 		/* Green Setting */
3010 		{ 0x1f, 0x0005 },
3011 		{ 0x05, 0x8b5b },
3012 		{ 0x06, 0x9222 },
3013 		{ 0x05, 0x8b6d },
3014 		{ 0x06, 0x8000 },
3015 		{ 0x05, 0x8b76 },
3016 		{ 0x06, 0x8000 },
3017 		{ 0x1f, 0x0000 }
3018 	};
3019 
3020 	rtl_apply_firmware(tp);
3021 
3022 	rtl_writephy_batch(tp, phy_reg_init);
3023 
3024 	/* For 4-corner performance improve */
3025 	rtl_writephy(tp, 0x1f, 0x0005);
3026 	rtl_writephy(tp, 0x05, 0x8b80);
3027 	rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
3028 	rtl_writephy(tp, 0x1f, 0x0000);
3029 
3030 	/* PHY auto speed down */
3031 	rtl_writephy(tp, 0x1f, 0x0004);
3032 	rtl_writephy(tp, 0x1f, 0x0007);
3033 	rtl_writephy(tp, 0x1e, 0x002d);
3034 	rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
3035 	rtl_writephy(tp, 0x1f, 0x0002);
3036 	rtl_writephy(tp, 0x1f, 0x0000);
3037 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3038 
3039 	/* improve 10M EEE waveform */
3040 	rtl_writephy(tp, 0x1f, 0x0005);
3041 	rtl_writephy(tp, 0x05, 0x8b86);
3042 	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3043 	rtl_writephy(tp, 0x1f, 0x0000);
3044 
3045 	/* Improve 2-pair detection performance */
3046 	rtl_writephy(tp, 0x1f, 0x0005);
3047 	rtl_writephy(tp, 0x05, 0x8b85);
3048 	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3049 	rtl_writephy(tp, 0x1f, 0x0000);
3050 
3051 	rtl8168f_config_eee_phy(tp);
3052 	rtl_enable_eee(tp);
3053 
3054 	/* Green feature */
3055 	rtl_writephy(tp, 0x1f, 0x0003);
3056 	rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000);
3057 	rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000);
3058 	rtl_writephy(tp, 0x1f, 0x0000);
3059 	rtl_writephy(tp, 0x1f, 0x0005);
3060 	rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
3061 	rtl_writephy(tp, 0x1f, 0x0000);
3062 
3063 	/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3064 	rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3065 }
3066 
3067 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3068 {
3069 	/* For 4-corner performance improve */
3070 	rtl_writephy(tp, 0x1f, 0x0005);
3071 	rtl_writephy(tp, 0x05, 0x8b80);
3072 	rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000);
3073 	rtl_writephy(tp, 0x1f, 0x0000);
3074 
3075 	/* PHY auto speed down */
3076 	rtl_writephy(tp, 0x1f, 0x0007);
3077 	rtl_writephy(tp, 0x1e, 0x002d);
3078 	rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
3079 	rtl_writephy(tp, 0x1f, 0x0000);
3080 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3081 
3082 	/* Improve 10M EEE waveform */
3083 	rtl_writephy(tp, 0x1f, 0x0005);
3084 	rtl_writephy(tp, 0x05, 0x8b86);
3085 	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3086 	rtl_writephy(tp, 0x1f, 0x0000);
3087 
3088 	rtl8168f_config_eee_phy(tp);
3089 	rtl_enable_eee(tp);
3090 }
3091 
3092 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3093 {
3094 	static const struct phy_reg phy_reg_init[] = {
3095 		/* Channel estimation fine tune */
3096 		{ 0x1f, 0x0003 },
3097 		{ 0x09, 0xa20f },
3098 		{ 0x1f, 0x0000 },
3099 
3100 		/* Modify green table for giga & fnet */
3101 		{ 0x1f, 0x0005 },
3102 		{ 0x05, 0x8b55 },
3103 		{ 0x06, 0x0000 },
3104 		{ 0x05, 0x8b5e },
3105 		{ 0x06, 0x0000 },
3106 		{ 0x05, 0x8b67 },
3107 		{ 0x06, 0x0000 },
3108 		{ 0x05, 0x8b70 },
3109 		{ 0x06, 0x0000 },
3110 		{ 0x1f, 0x0000 },
3111 		{ 0x1f, 0x0007 },
3112 		{ 0x1e, 0x0078 },
3113 		{ 0x17, 0x0000 },
3114 		{ 0x19, 0x00fb },
3115 		{ 0x1f, 0x0000 },
3116 
3117 		/* Modify green table for 10M */
3118 		{ 0x1f, 0x0005 },
3119 		{ 0x05, 0x8b79 },
3120 		{ 0x06, 0xaa00 },
3121 		{ 0x1f, 0x0000 },
3122 
3123 		/* Disable hiimpedance detection (RTCT) */
3124 		{ 0x1f, 0x0003 },
3125 		{ 0x01, 0x328a },
3126 		{ 0x1f, 0x0000 }
3127 	};
3128 
3129 	rtl_apply_firmware(tp);
3130 
3131 	rtl_writephy_batch(tp, phy_reg_init);
3132 
3133 	rtl8168f_hw_phy_config(tp);
3134 
3135 	/* Improve 2-pair detection performance */
3136 	rtl_writephy(tp, 0x1f, 0x0005);
3137 	rtl_writephy(tp, 0x05, 0x8b85);
3138 	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3139 	rtl_writephy(tp, 0x1f, 0x0000);
3140 }
3141 
3142 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3143 {
3144 	rtl_apply_firmware(tp);
3145 
3146 	rtl8168f_hw_phy_config(tp);
3147 }
3148 
3149 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3150 {
3151 	static const struct phy_reg phy_reg_init[] = {
3152 		/* Channel estimation fine tune */
3153 		{ 0x1f, 0x0003 },
3154 		{ 0x09, 0xa20f },
3155 		{ 0x1f, 0x0000 },
3156 
3157 		/* Modify green table for giga & fnet */
3158 		{ 0x1f, 0x0005 },
3159 		{ 0x05, 0x8b55 },
3160 		{ 0x06, 0x0000 },
3161 		{ 0x05, 0x8b5e },
3162 		{ 0x06, 0x0000 },
3163 		{ 0x05, 0x8b67 },
3164 		{ 0x06, 0x0000 },
3165 		{ 0x05, 0x8b70 },
3166 		{ 0x06, 0x0000 },
3167 		{ 0x1f, 0x0000 },
3168 		{ 0x1f, 0x0007 },
3169 		{ 0x1e, 0x0078 },
3170 		{ 0x17, 0x0000 },
3171 		{ 0x19, 0x00aa },
3172 		{ 0x1f, 0x0000 },
3173 
3174 		/* Modify green table for 10M */
3175 		{ 0x1f, 0x0005 },
3176 		{ 0x05, 0x8b79 },
3177 		{ 0x06, 0xaa00 },
3178 		{ 0x1f, 0x0000 },
3179 
3180 		/* Disable hiimpedance detection (RTCT) */
3181 		{ 0x1f, 0x0003 },
3182 		{ 0x01, 0x328a },
3183 		{ 0x1f, 0x0000 }
3184 	};
3185 
3186 
3187 	rtl_apply_firmware(tp);
3188 
3189 	rtl8168f_hw_phy_config(tp);
3190 
3191 	/* Improve 2-pair detection performance */
3192 	rtl_writephy(tp, 0x1f, 0x0005);
3193 	rtl_writephy(tp, 0x05, 0x8b85);
3194 	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3195 	rtl_writephy(tp, 0x1f, 0x0000);
3196 
3197 	rtl_writephy_batch(tp, phy_reg_init);
3198 
3199 	/* Modify green table for giga */
3200 	rtl_writephy(tp, 0x1f, 0x0005);
3201 	rtl_writephy(tp, 0x05, 0x8b54);
3202 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
3203 	rtl_writephy(tp, 0x05, 0x8b5d);
3204 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
3205 	rtl_writephy(tp, 0x05, 0x8a7c);
3206 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3207 	rtl_writephy(tp, 0x05, 0x8a7f);
3208 	rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000);
3209 	rtl_writephy(tp, 0x05, 0x8a82);
3210 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3211 	rtl_writephy(tp, 0x05, 0x8a85);
3212 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3213 	rtl_writephy(tp, 0x05, 0x8a88);
3214 	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3215 	rtl_writephy(tp, 0x1f, 0x0000);
3216 
3217 	/* uc same-seed solution */
3218 	rtl_writephy(tp, 0x1f, 0x0005);
3219 	rtl_writephy(tp, 0x05, 0x8b85);
3220 	rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000);
3221 	rtl_writephy(tp, 0x1f, 0x0000);
3222 
3223 	/* Green feature */
3224 	rtl_writephy(tp, 0x1f, 0x0003);
3225 	rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
3226 	rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
3227 	rtl_writephy(tp, 0x1f, 0x0000);
3228 }
3229 
3230 static void rtl8168g_disable_aldps(struct rtl8169_private *tp)
3231 {
3232 	phy_modify_paged(tp->phydev, 0x0a43, 0x10, BIT(2), 0);
3233 }
3234 
3235 static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
3236 {
3237 	struct phy_device *phydev = tp->phydev;
3238 
3239 	phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
3240 	phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
3241 	phy_write(phydev, 0x1f, 0x0a43);
3242 	phy_write(phydev, 0x13, 0x8084);
3243 	phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13));
3244 	phy_set_bits(phydev, 0x10, BIT(12) | BIT(1) | BIT(0));
3245 
3246 	phy_write(phydev, 0x1f, 0x0000);
3247 }
3248 
3249 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3250 {
3251 	int ret;
3252 
3253 	rtl_apply_firmware(tp);
3254 
3255 	ret = phy_read_paged(tp->phydev, 0x0a46, 0x10);
3256 	if (ret & BIT(8))
3257 		phy_modify_paged(tp->phydev, 0x0bcc, 0x12, BIT(15), 0);
3258 	else
3259 		phy_modify_paged(tp->phydev, 0x0bcc, 0x12, 0, BIT(15));
3260 
3261 	ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
3262 	if (ret & BIT(8))
3263 		phy_modify_paged(tp->phydev, 0x0c41, 0x15, 0, BIT(1));
3264 	else
3265 		phy_modify_paged(tp->phydev, 0x0c41, 0x15, BIT(1), 0);
3266 
3267 	/* Enable PHY auto speed down */
3268 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
3269 
3270 	rtl8168g_phy_adjust_10m_aldps(tp);
3271 
3272 	/* EEE auto-fallback function */
3273 	phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
3274 
3275 	/* Enable UC LPF tune function */
3276 	rtl_writephy(tp, 0x1f, 0x0a43);
3277 	rtl_writephy(tp, 0x13, 0x8012);
3278 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3279 
3280 	phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
3281 
3282 	/* Improve SWR Efficiency */
3283 	rtl_writephy(tp, 0x1f, 0x0bcd);
3284 	rtl_writephy(tp, 0x14, 0x5065);
3285 	rtl_writephy(tp, 0x14, 0xd065);
3286 	rtl_writephy(tp, 0x1f, 0x0bc8);
3287 	rtl_writephy(tp, 0x11, 0x5655);
3288 	rtl_writephy(tp, 0x1f, 0x0bcd);
3289 	rtl_writephy(tp, 0x14, 0x1065);
3290 	rtl_writephy(tp, 0x14, 0x9065);
3291 	rtl_writephy(tp, 0x14, 0x1065);
3292 	rtl_writephy(tp, 0x1f, 0x0000);
3293 
3294 	rtl8168g_disable_aldps(tp);
3295 	rtl8168g_config_eee_phy(tp);
3296 	rtl_enable_eee(tp);
3297 }
3298 
3299 static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
3300 {
3301 	rtl_apply_firmware(tp);
3302 	rtl8168g_config_eee_phy(tp);
3303 	rtl_enable_eee(tp);
3304 }
3305 
3306 static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
3307 {
3308 	u16 dout_tapbin;
3309 	u32 data;
3310 
3311 	rtl_apply_firmware(tp);
3312 
3313 	/* CHN EST parameters adjust - giga master */
3314 	rtl_writephy(tp, 0x1f, 0x0a43);
3315 	rtl_writephy(tp, 0x13, 0x809b);
3316 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800);
3317 	rtl_writephy(tp, 0x13, 0x80a2);
3318 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00);
3319 	rtl_writephy(tp, 0x13, 0x80a4);
3320 	rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00);
3321 	rtl_writephy(tp, 0x13, 0x809c);
3322 	rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00);
3323 	rtl_writephy(tp, 0x1f, 0x0000);
3324 
3325 	/* CHN EST parameters adjust - giga slave */
3326 	rtl_writephy(tp, 0x1f, 0x0a43);
3327 	rtl_writephy(tp, 0x13, 0x80ad);
3328 	rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800);
3329 	rtl_writephy(tp, 0x13, 0x80b4);
3330 	rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00);
3331 	rtl_writephy(tp, 0x13, 0x80ac);
3332 	rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00);
3333 	rtl_writephy(tp, 0x1f, 0x0000);
3334 
3335 	/* CHN EST parameters adjust - fnet */
3336 	rtl_writephy(tp, 0x1f, 0x0a43);
3337 	rtl_writephy(tp, 0x13, 0x808e);
3338 	rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00);
3339 	rtl_writephy(tp, 0x13, 0x8090);
3340 	rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00);
3341 	rtl_writephy(tp, 0x13, 0x8092);
3342 	rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00);
3343 	rtl_writephy(tp, 0x1f, 0x0000);
3344 
3345 	/* enable R-tune & PGA-retune function */
3346 	dout_tapbin = 0;
3347 	rtl_writephy(tp, 0x1f, 0x0a46);
3348 	data = rtl_readphy(tp, 0x13);
3349 	data &= 3;
3350 	data <<= 2;
3351 	dout_tapbin |= data;
3352 	data = rtl_readphy(tp, 0x12);
3353 	data &= 0xc000;
3354 	data >>= 14;
3355 	dout_tapbin |= data;
3356 	dout_tapbin = ~(dout_tapbin^0x08);
3357 	dout_tapbin <<= 12;
3358 	dout_tapbin &= 0xf000;
3359 	rtl_writephy(tp, 0x1f, 0x0a43);
3360 	rtl_writephy(tp, 0x13, 0x827a);
3361 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3362 	rtl_writephy(tp, 0x13, 0x827b);
3363 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3364 	rtl_writephy(tp, 0x13, 0x827c);
3365 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3366 	rtl_writephy(tp, 0x13, 0x827d);
3367 	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3368 
3369 	rtl_writephy(tp, 0x1f, 0x0a43);
3370 	rtl_writephy(tp, 0x13, 0x0811);
3371 	rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
3372 	rtl_writephy(tp, 0x1f, 0x0a42);
3373 	rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
3374 	rtl_writephy(tp, 0x1f, 0x0000);
3375 
3376 	/* enable GPHY 10M */
3377 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
3378 
3379 	/* SAR ADC performance */
3380 	phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
3381 
3382 	rtl_writephy(tp, 0x1f, 0x0a43);
3383 	rtl_writephy(tp, 0x13, 0x803f);
3384 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3385 	rtl_writephy(tp, 0x13, 0x8047);
3386 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3387 	rtl_writephy(tp, 0x13, 0x804f);
3388 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3389 	rtl_writephy(tp, 0x13, 0x8057);
3390 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3391 	rtl_writephy(tp, 0x13, 0x805f);
3392 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3393 	rtl_writephy(tp, 0x13, 0x8067);
3394 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3395 	rtl_writephy(tp, 0x13, 0x806f);
3396 	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3397 	rtl_writephy(tp, 0x1f, 0x0000);
3398 
3399 	/* disable phy pfm mode */
3400 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
3401 
3402 	rtl8168g_disable_aldps(tp);
3403 	rtl8168g_config_eee_phy(tp);
3404 	rtl_enable_eee(tp);
3405 }
3406 
3407 static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
3408 {
3409 	u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
3410 	u16 rlen;
3411 	u32 data;
3412 
3413 	rtl_apply_firmware(tp);
3414 
3415 	/* CHIN EST parameter update */
3416 	rtl_writephy(tp, 0x1f, 0x0a43);
3417 	rtl_writephy(tp, 0x13, 0x808a);
3418 	rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f);
3419 	rtl_writephy(tp, 0x1f, 0x0000);
3420 
3421 	/* enable R-tune & PGA-retune function */
3422 	rtl_writephy(tp, 0x1f, 0x0a43);
3423 	rtl_writephy(tp, 0x13, 0x0811);
3424 	rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
3425 	rtl_writephy(tp, 0x1f, 0x0a42);
3426 	rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
3427 	rtl_writephy(tp, 0x1f, 0x0000);
3428 
3429 	/* enable GPHY 10M */
3430 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
3431 
3432 	r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
3433 	data = r8168_mac_ocp_read(tp, 0xdd02);
3434 	ioffset_p3 = ((data & 0x80)>>7);
3435 	ioffset_p3 <<= 3;
3436 
3437 	data = r8168_mac_ocp_read(tp, 0xdd00);
3438 	ioffset_p3 |= ((data & (0xe000))>>13);
3439 	ioffset_p2 = ((data & (0x1e00))>>9);
3440 	ioffset_p1 = ((data & (0x01e0))>>5);
3441 	ioffset_p0 = ((data & 0x0010)>>4);
3442 	ioffset_p0 <<= 3;
3443 	ioffset_p0 |= (data & (0x07));
3444 	data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
3445 
3446 	if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
3447 	    (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) {
3448 		rtl_writephy(tp, 0x1f, 0x0bcf);
3449 		rtl_writephy(tp, 0x16, data);
3450 		rtl_writephy(tp, 0x1f, 0x0000);
3451 	}
3452 
3453 	/* Modify rlen (TX LPF corner frequency) level */
3454 	rtl_writephy(tp, 0x1f, 0x0bcd);
3455 	data = rtl_readphy(tp, 0x16);
3456 	data &= 0x000f;
3457 	rlen = 0;
3458 	if (data > 3)
3459 		rlen = data - 3;
3460 	data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
3461 	rtl_writephy(tp, 0x17, data);
3462 	rtl_writephy(tp, 0x1f, 0x0bcd);
3463 	rtl_writephy(tp, 0x1f, 0x0000);
3464 
3465 	/* disable phy pfm mode */
3466 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
3467 
3468 	rtl8168g_disable_aldps(tp);
3469 	rtl8168g_config_eee_phy(tp);
3470 	rtl_enable_eee(tp);
3471 }
3472 
3473 static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
3474 {
3475 	/* Enable PHY auto speed down */
3476 	phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
3477 
3478 	rtl8168g_phy_adjust_10m_aldps(tp);
3479 
3480 	/* Enable EEE auto-fallback function */
3481 	phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
3482 
3483 	/* Enable UC LPF tune function */
3484 	rtl_writephy(tp, 0x1f, 0x0a43);
3485 	rtl_writephy(tp, 0x13, 0x8012);
3486 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3487 	rtl_writephy(tp, 0x1f, 0x0000);
3488 
3489 	/* set rg_sel_sdm_rate */
3490 	phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
3491 
3492 	rtl8168g_disable_aldps(tp);
3493 	rtl8168g_config_eee_phy(tp);
3494 	rtl_enable_eee(tp);
3495 }
3496 
3497 static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
3498 {
3499 	rtl8168g_phy_adjust_10m_aldps(tp);
3500 
3501 	/* Enable UC LPF tune function */
3502 	rtl_writephy(tp, 0x1f, 0x0a43);
3503 	rtl_writephy(tp, 0x13, 0x8012);
3504 	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3505 	rtl_writephy(tp, 0x1f, 0x0000);
3506 
3507 	/* Set rg_sel_sdm_rate */
3508 	phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
3509 
3510 	/* Channel estimation parameters */
3511 	rtl_writephy(tp, 0x1f, 0x0a43);
3512 	rtl_writephy(tp, 0x13, 0x80f3);
3513 	rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff);
3514 	rtl_writephy(tp, 0x13, 0x80f0);
3515 	rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff);
3516 	rtl_writephy(tp, 0x13, 0x80ef);
3517 	rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff);
3518 	rtl_writephy(tp, 0x13, 0x80f6);
3519 	rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff);
3520 	rtl_writephy(tp, 0x13, 0x80ec);
3521 	rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff);
3522 	rtl_writephy(tp, 0x13, 0x80ed);
3523 	rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
3524 	rtl_writephy(tp, 0x13, 0x80f2);
3525 	rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff);
3526 	rtl_writephy(tp, 0x13, 0x80f4);
3527 	rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff);
3528 	rtl_writephy(tp, 0x1f, 0x0a43);
3529 	rtl_writephy(tp, 0x13, 0x8110);
3530 	rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff);
3531 	rtl_writephy(tp, 0x13, 0x810f);
3532 	rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff);
3533 	rtl_writephy(tp, 0x13, 0x8111);
3534 	rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff);
3535 	rtl_writephy(tp, 0x13, 0x8113);
3536 	rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff);
3537 	rtl_writephy(tp, 0x13, 0x8115);
3538 	rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff);
3539 	rtl_writephy(tp, 0x13, 0x810e);
3540 	rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff);
3541 	rtl_writephy(tp, 0x13, 0x810c);
3542 	rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
3543 	rtl_writephy(tp, 0x13, 0x810b);
3544 	rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff);
3545 	rtl_writephy(tp, 0x1f, 0x0a43);
3546 	rtl_writephy(tp, 0x13, 0x80d1);
3547 	rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff);
3548 	rtl_writephy(tp, 0x13, 0x80cd);
3549 	rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff);
3550 	rtl_writephy(tp, 0x13, 0x80d3);
3551 	rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff);
3552 	rtl_writephy(tp, 0x13, 0x80d5);
3553 	rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff);
3554 	rtl_writephy(tp, 0x13, 0x80d7);
3555 	rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff);
3556 
3557 	/* Force PWM-mode */
3558 	rtl_writephy(tp, 0x1f, 0x0bcd);
3559 	rtl_writephy(tp, 0x14, 0x5065);
3560 	rtl_writephy(tp, 0x14, 0xd065);
3561 	rtl_writephy(tp, 0x1f, 0x0bc8);
3562 	rtl_writephy(tp, 0x12, 0x00ed);
3563 	rtl_writephy(tp, 0x1f, 0x0bcd);
3564 	rtl_writephy(tp, 0x14, 0x1065);
3565 	rtl_writephy(tp, 0x14, 0x9065);
3566 	rtl_writephy(tp, 0x14, 0x1065);
3567 	rtl_writephy(tp, 0x1f, 0x0000);
3568 
3569 	rtl8168g_disable_aldps(tp);
3570 	rtl8168g_config_eee_phy(tp);
3571 	rtl_enable_eee(tp);
3572 }
3573 
3574 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3575 {
3576 	static const struct phy_reg phy_reg_init[] = {
3577 		{ 0x1f, 0x0003 },
3578 		{ 0x08, 0x441d },
3579 		{ 0x01, 0x9100 },
3580 		{ 0x1f, 0x0000 }
3581 	};
3582 
3583 	rtl_writephy(tp, 0x1f, 0x0000);
3584 	rtl_patchphy(tp, 0x11, 1 << 12);
3585 	rtl_patchphy(tp, 0x19, 1 << 13);
3586 	rtl_patchphy(tp, 0x10, 1 << 15);
3587 
3588 	rtl_writephy_batch(tp, phy_reg_init);
3589 }
3590 
3591 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3592 {
3593 	static const struct phy_reg phy_reg_init[] = {
3594 		{ 0x1f, 0x0005 },
3595 		{ 0x1a, 0x0000 },
3596 		{ 0x1f, 0x0000 },
3597 
3598 		{ 0x1f, 0x0004 },
3599 		{ 0x1c, 0x0000 },
3600 		{ 0x1f, 0x0000 },
3601 
3602 		{ 0x1f, 0x0001 },
3603 		{ 0x15, 0x7701 },
3604 		{ 0x1f, 0x0000 }
3605 	};
3606 
3607 	/* Disable ALDPS before ram code */
3608 	rtl_writephy(tp, 0x1f, 0x0000);
3609 	rtl_writephy(tp, 0x18, 0x0310);
3610 	msleep(100);
3611 
3612 	rtl_apply_firmware(tp);
3613 
3614 	rtl_writephy_batch(tp, phy_reg_init);
3615 }
3616 
3617 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3618 {
3619 	/* Disable ALDPS before setting firmware */
3620 	rtl_writephy(tp, 0x1f, 0x0000);
3621 	rtl_writephy(tp, 0x18, 0x0310);
3622 	msleep(20);
3623 
3624 	rtl_apply_firmware(tp);
3625 
3626 	/* EEE setting */
3627 	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3628 	rtl_writephy(tp, 0x1f, 0x0004);
3629 	rtl_writephy(tp, 0x10, 0x401f);
3630 	rtl_writephy(tp, 0x19, 0x7030);
3631 	rtl_writephy(tp, 0x1f, 0x0000);
3632 }
3633 
3634 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3635 {
3636 	static const struct phy_reg phy_reg_init[] = {
3637 		{ 0x1f, 0x0004 },
3638 		{ 0x10, 0xc07f },
3639 		{ 0x19, 0x7030 },
3640 		{ 0x1f, 0x0000 }
3641 	};
3642 
3643 	/* Disable ALDPS before ram code */
3644 	rtl_writephy(tp, 0x1f, 0x0000);
3645 	rtl_writephy(tp, 0x18, 0x0310);
3646 	msleep(100);
3647 
3648 	rtl_apply_firmware(tp);
3649 
3650 	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3651 	rtl_writephy_batch(tp, phy_reg_init);
3652 
3653 	rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
3654 }
3655 
3656 static void rtl_hw_phy_config(struct net_device *dev)
3657 {
3658 	static const rtl_generic_fct phy_configs[] = {
3659 		/* PCI devices. */
3660 		[RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
3661 		[RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
3662 		[RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
3663 		[RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
3664 		[RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
3665 		/* PCI-E devices. */
3666 		[RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
3667 		[RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
3668 		[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
3669 		[RTL_GIGA_MAC_VER_10] = NULL,
3670 		[RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
3671 		[RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
3672 		[RTL_GIGA_MAC_VER_13] = NULL,
3673 		[RTL_GIGA_MAC_VER_14] = NULL,
3674 		[RTL_GIGA_MAC_VER_15] = NULL,
3675 		[RTL_GIGA_MAC_VER_16] = NULL,
3676 		[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
3677 		[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
3678 		[RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
3679 		[RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
3680 		[RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
3681 		[RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config,
3682 		[RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
3683 		[RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
3684 		[RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
3685 		[RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
3686 		[RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
3687 		[RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
3688 		[RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
3689 		[RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
3690 		[RTL_GIGA_MAC_VER_31] = NULL,
3691 		[RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
3692 		[RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
3693 		[RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
3694 		[RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
3695 		[RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
3696 		[RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
3697 		[RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
3698 		[RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
3699 		[RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
3700 		[RTL_GIGA_MAC_VER_41] = NULL,
3701 		[RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
3702 		[RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
3703 		[RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
3704 		[RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
3705 		[RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
3706 		[RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
3707 		[RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
3708 		[RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
3709 		[RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
3710 		[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
3711 	};
3712 	struct rtl8169_private *tp = netdev_priv(dev);
3713 
3714 	if (phy_configs[tp->mac_version])
3715 		phy_configs[tp->mac_version](tp);
3716 }
3717 
3718 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3719 {
3720 	if (!test_and_set_bit(flag, tp->wk.flags))
3721 		schedule_work(&tp->wk.work);
3722 }
3723 
3724 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3725 {
3726 	rtl_hw_phy_config(dev);
3727 
3728 	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3729 		pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3730 		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3731 		netif_dbg(tp, drv, dev,
3732 			  "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3733 		RTL_W8(tp, 0x82, 0x01);
3734 	}
3735 
3736 	/* We may have called phy_speed_down before */
3737 	phy_speed_up(tp->phydev);
3738 
3739 	genphy_soft_reset(tp->phydev);
3740 }
3741 
3742 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3743 {
3744 	rtl_lock_work(tp);
3745 
3746 	rtl_unlock_config_regs(tp);
3747 
3748 	RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
3749 	RTL_R32(tp, MAC4);
3750 
3751 	RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3752 	RTL_R32(tp, MAC0);
3753 
3754 	if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3755 		rtl_rar_exgmac_set(tp, addr);
3756 
3757 	rtl_lock_config_regs(tp);
3758 
3759 	rtl_unlock_work(tp);
3760 }
3761 
3762 static int rtl_set_mac_address(struct net_device *dev, void *p)
3763 {
3764 	struct rtl8169_private *tp = netdev_priv(dev);
3765 	struct device *d = tp_to_dev(tp);
3766 	int ret;
3767 
3768 	ret = eth_mac_addr(dev, p);
3769 	if (ret)
3770 		return ret;
3771 
3772 	pm_runtime_get_noresume(d);
3773 
3774 	if (pm_runtime_active(d))
3775 		rtl_rar_set(tp, dev->dev_addr);
3776 
3777 	pm_runtime_put_noidle(d);
3778 
3779 	return 0;
3780 }
3781 
3782 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3783 {
3784 	struct rtl8169_private *tp = netdev_priv(dev);
3785 
3786 	if (!netif_running(dev))
3787 		return -ENODEV;
3788 
3789 	return phy_mii_ioctl(tp->phydev, ifr, cmd);
3790 }
3791 
3792 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3793 {
3794 	switch (tp->mac_version) {
3795 	case RTL_GIGA_MAC_VER_25:
3796 	case RTL_GIGA_MAC_VER_26:
3797 	case RTL_GIGA_MAC_VER_29:
3798 	case RTL_GIGA_MAC_VER_30:
3799 	case RTL_GIGA_MAC_VER_32:
3800 	case RTL_GIGA_MAC_VER_33:
3801 	case RTL_GIGA_MAC_VER_34:
3802 	case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51:
3803 		RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
3804 			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3805 		break;
3806 	default:
3807 		break;
3808 	}
3809 }
3810 
3811 static void rtl_pll_power_down(struct rtl8169_private *tp)
3812 {
3813 	if (r8168_check_dash(tp))
3814 		return;
3815 
3816 	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3817 	    tp->mac_version == RTL_GIGA_MAC_VER_33)
3818 		rtl_ephy_write(tp, 0x19, 0xff64);
3819 
3820 	if (device_may_wakeup(tp_to_dev(tp))) {
3821 		phy_speed_down(tp->phydev, false);
3822 		rtl_wol_suspend_quirk(tp);
3823 		return;
3824 	}
3825 
3826 	switch (tp->mac_version) {
3827 	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
3828 	case RTL_GIGA_MAC_VER_37:
3829 	case RTL_GIGA_MAC_VER_39:
3830 	case RTL_GIGA_MAC_VER_43:
3831 	case RTL_GIGA_MAC_VER_44:
3832 	case RTL_GIGA_MAC_VER_45:
3833 	case RTL_GIGA_MAC_VER_46:
3834 	case RTL_GIGA_MAC_VER_47:
3835 	case RTL_GIGA_MAC_VER_48:
3836 	case RTL_GIGA_MAC_VER_50:
3837 	case RTL_GIGA_MAC_VER_51:
3838 		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
3839 		break;
3840 	case RTL_GIGA_MAC_VER_40:
3841 	case RTL_GIGA_MAC_VER_41:
3842 	case RTL_GIGA_MAC_VER_49:
3843 		rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
3844 		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
3845 		break;
3846 	default:
3847 		break;
3848 	}
3849 }
3850 
3851 static void rtl_pll_power_up(struct rtl8169_private *tp)
3852 {
3853 	switch (tp->mac_version) {
3854 	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
3855 	case RTL_GIGA_MAC_VER_37:
3856 	case RTL_GIGA_MAC_VER_39:
3857 	case RTL_GIGA_MAC_VER_43:
3858 		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
3859 		break;
3860 	case RTL_GIGA_MAC_VER_44:
3861 	case RTL_GIGA_MAC_VER_45:
3862 	case RTL_GIGA_MAC_VER_46:
3863 	case RTL_GIGA_MAC_VER_47:
3864 	case RTL_GIGA_MAC_VER_48:
3865 	case RTL_GIGA_MAC_VER_50:
3866 	case RTL_GIGA_MAC_VER_51:
3867 		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
3868 		break;
3869 	case RTL_GIGA_MAC_VER_40:
3870 	case RTL_GIGA_MAC_VER_41:
3871 	case RTL_GIGA_MAC_VER_49:
3872 		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
3873 		rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
3874 		break;
3875 	default:
3876 		break;
3877 	}
3878 
3879 	phy_resume(tp->phydev);
3880 	/* give MAC/PHY some time to resume */
3881 	msleep(20);
3882 }
3883 
3884 static void rtl_init_rxcfg(struct rtl8169_private *tp)
3885 {
3886 	switch (tp->mac_version) {
3887 	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
3888 	case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
3889 		RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
3890 		break;
3891 	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
3892 	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
3893 	case RTL_GIGA_MAC_VER_38:
3894 		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
3895 		break;
3896 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
3897 		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
3898 		break;
3899 	default:
3900 		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
3901 		break;
3902 	}
3903 }
3904 
3905 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
3906 {
3907 	tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
3908 }
3909 
3910 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
3911 {
3912 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
3913 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
3914 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
3915 }
3916 
3917 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
3918 {
3919 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
3920 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
3921 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
3922 }
3923 
3924 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
3925 {
3926 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
3927 }
3928 
3929 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
3930 {
3931 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
3932 }
3933 
3934 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
3935 {
3936 	RTL_W8(tp, MaxTxPacketSize, 0x3f);
3937 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
3938 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
3939 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
3940 }
3941 
3942 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
3943 {
3944 	RTL_W8(tp, MaxTxPacketSize, 0x0c);
3945 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
3946 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
3947 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
3948 }
3949 
3950 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
3951 {
3952 	rtl_tx_performance_tweak(tp,
3953 		PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
3954 }
3955 
3956 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
3957 {
3958 	rtl_tx_performance_tweak(tp,
3959 		PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN);
3960 }
3961 
3962 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
3963 {
3964 	r8168b_0_hw_jumbo_enable(tp);
3965 
3966 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
3967 }
3968 
3969 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
3970 {
3971 	r8168b_0_hw_jumbo_disable(tp);
3972 
3973 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
3974 }
3975 
3976 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
3977 {
3978 	rtl_unlock_config_regs(tp);
3979 	switch (tp->mac_version) {
3980 	case RTL_GIGA_MAC_VER_11:
3981 		r8168b_0_hw_jumbo_enable(tp);
3982 		break;
3983 	case RTL_GIGA_MAC_VER_12:
3984 	case RTL_GIGA_MAC_VER_17:
3985 		r8168b_1_hw_jumbo_enable(tp);
3986 		break;
3987 	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
3988 		r8168c_hw_jumbo_enable(tp);
3989 		break;
3990 	case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
3991 		r8168dp_hw_jumbo_enable(tp);
3992 		break;
3993 	case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
3994 		r8168e_hw_jumbo_enable(tp);
3995 		break;
3996 	default:
3997 		break;
3998 	}
3999 	rtl_lock_config_regs(tp);
4000 }
4001 
4002 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4003 {
4004 	rtl_unlock_config_regs(tp);
4005 	switch (tp->mac_version) {
4006 	case RTL_GIGA_MAC_VER_11:
4007 		r8168b_0_hw_jumbo_disable(tp);
4008 		break;
4009 	case RTL_GIGA_MAC_VER_12:
4010 	case RTL_GIGA_MAC_VER_17:
4011 		r8168b_1_hw_jumbo_disable(tp);
4012 		break;
4013 	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
4014 		r8168c_hw_jumbo_disable(tp);
4015 		break;
4016 	case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
4017 		r8168dp_hw_jumbo_disable(tp);
4018 		break;
4019 	case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
4020 		r8168e_hw_jumbo_disable(tp);
4021 		break;
4022 	default:
4023 		break;
4024 	}
4025 	rtl_lock_config_regs(tp);
4026 }
4027 
4028 DECLARE_RTL_COND(rtl_chipcmd_cond)
4029 {
4030 	return RTL_R8(tp, ChipCmd) & CmdReset;
4031 }
4032 
4033 static void rtl_hw_reset(struct rtl8169_private *tp)
4034 {
4035 	RTL_W8(tp, ChipCmd, CmdReset);
4036 
4037 	rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4038 }
4039 
4040 static void rtl_request_firmware(struct rtl8169_private *tp)
4041 {
4042 	struct rtl_fw *rtl_fw;
4043 
4044 	/* firmware loaded already or no firmware available */
4045 	if (tp->rtl_fw || !tp->fw_name)
4046 		return;
4047 
4048 	rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4049 	if (!rtl_fw) {
4050 		netif_warn(tp, ifup, tp->dev, "Unable to load firmware, out of memory\n");
4051 		return;
4052 	}
4053 
4054 	rtl_fw->phy_write = rtl_writephy;
4055 	rtl_fw->phy_read = rtl_readphy;
4056 	rtl_fw->mac_mcu_write = mac_mcu_write;
4057 	rtl_fw->mac_mcu_read = mac_mcu_read;
4058 	rtl_fw->fw_name = tp->fw_name;
4059 	rtl_fw->dev = tp_to_dev(tp);
4060 
4061 	if (rtl_fw_request_firmware(rtl_fw))
4062 		kfree(rtl_fw);
4063 	else
4064 		tp->rtl_fw = rtl_fw;
4065 }
4066 
4067 static void rtl_rx_close(struct rtl8169_private *tp)
4068 {
4069 	RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4070 }
4071 
4072 DECLARE_RTL_COND(rtl_npq_cond)
4073 {
4074 	return RTL_R8(tp, TxPoll) & NPQ;
4075 }
4076 
4077 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4078 {
4079 	return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
4080 }
4081 
4082 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4083 {
4084 	/* Disable interrupts */
4085 	rtl8169_irq_mask_and_ack(tp);
4086 
4087 	rtl_rx_close(tp);
4088 
4089 	switch (tp->mac_version) {
4090 	case RTL_GIGA_MAC_VER_27:
4091 	case RTL_GIGA_MAC_VER_28:
4092 	case RTL_GIGA_MAC_VER_31:
4093 		rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4094 		break;
4095 	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
4096 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
4097 		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
4098 		rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4099 		break;
4100 	default:
4101 		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
4102 		udelay(100);
4103 		break;
4104 	}
4105 
4106 	rtl_hw_reset(tp);
4107 }
4108 
4109 static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
4110 {
4111 	u32 val = TX_DMA_BURST << TxDMAShift |
4112 		  InterFrameGap << TxInterFrameGapShift;
4113 
4114 	if (rtl_is_8168evl_up(tp))
4115 		val |= TXCFG_AUTO_FIFO;
4116 
4117 	RTL_W32(tp, TxConfig, val);
4118 }
4119 
4120 static void rtl_set_rx_max_size(struct rtl8169_private *tp)
4121 {
4122 	/* Low hurts. Let's disable the filtering. */
4123 	RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1);
4124 }
4125 
4126 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
4127 {
4128 	/*
4129 	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4130 	 * register to be written before TxDescAddrLow to work.
4131 	 * Switching from MMIO to I/O access fixes the issue as well.
4132 	 */
4133 	RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4134 	RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4135 	RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4136 	RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4137 }
4138 
4139 static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
4140 {
4141 	u32 val;
4142 
4143 	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
4144 		val = 0x000fff00;
4145 	else if (tp->mac_version == RTL_GIGA_MAC_VER_06)
4146 		val = 0x00ffff00;
4147 	else
4148 		return;
4149 
4150 	if (RTL_R8(tp, Config2) & PCI_Clock_66MHz)
4151 		val |= 0xff;
4152 
4153 	RTL_W32(tp, 0x7c, val);
4154 }
4155 
4156 static void rtl_set_rx_mode(struct net_device *dev)
4157 {
4158 	u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast;
4159 	/* Multicast hash filter */
4160 	u32 mc_filter[2] = { 0xffffffff, 0xffffffff };
4161 	struct rtl8169_private *tp = netdev_priv(dev);
4162 	u32 tmp;
4163 
4164 	if (dev->flags & IFF_PROMISC) {
4165 		/* Unconditionally log net taps. */
4166 		netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4167 		rx_mode |= AcceptAllPhys;
4168 	} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
4169 		   dev->flags & IFF_ALLMULTI ||
4170 		   tp->mac_version == RTL_GIGA_MAC_VER_35) {
4171 		/* accept all multicasts */
4172 	} else if (netdev_mc_empty(dev)) {
4173 		rx_mode &= ~AcceptMulticast;
4174 	} else {
4175 		struct netdev_hw_addr *ha;
4176 
4177 		mc_filter[1] = mc_filter[0] = 0;
4178 		netdev_for_each_mc_addr(ha, dev) {
4179 			u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4180 			mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
4181 		}
4182 
4183 		if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4184 			tmp = mc_filter[0];
4185 			mc_filter[0] = swab32(mc_filter[1]);
4186 			mc_filter[1] = swab32(tmp);
4187 		}
4188 	}
4189 
4190 	if (dev->features & NETIF_F_RXALL)
4191 		rx_mode |= (AcceptErr | AcceptRunt);
4192 
4193 	RTL_W32(tp, MAR0 + 4, mc_filter[1]);
4194 	RTL_W32(tp, MAR0 + 0, mc_filter[0]);
4195 
4196 	tmp = RTL_R32(tp, RxConfig);
4197 	RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode);
4198 }
4199 
4200 DECLARE_RTL_COND(rtl_csiar_cond)
4201 {
4202 	return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
4203 }
4204 
4205 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4206 {
4207 	u32 func = PCI_FUNC(tp->pci_dev->devfn);
4208 
4209 	RTL_W32(tp, CSIDR, value);
4210 	RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4211 		CSIAR_BYTE_ENABLE | func << 16);
4212 
4213 	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4214 }
4215 
4216 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4217 {
4218 	u32 func = PCI_FUNC(tp->pci_dev->devfn);
4219 
4220 	RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
4221 		CSIAR_BYTE_ENABLE);
4222 
4223 	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4224 		RTL_R32(tp, CSIDR) : ~0;
4225 }
4226 
4227 static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
4228 {
4229 	struct pci_dev *pdev = tp->pci_dev;
4230 	u32 csi;
4231 
4232 	/* According to Realtek the value at config space address 0x070f
4233 	 * controls the L0s/L1 entrance latency. We try standard ECAM access
4234 	 * first and if it fails fall back to CSI.
4235 	 */
4236 	if (pdev->cfg_size > 0x070f &&
4237 	    pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
4238 		return;
4239 
4240 	netdev_notice_once(tp->dev,
4241 		"No native access to PCI extended config space, falling back to CSI\n");
4242 	csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4243 	rtl_csi_write(tp, 0x070c, csi | val << 24);
4244 }
4245 
4246 static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
4247 {
4248 	rtl_csi_access_enable(tp, 0x27);
4249 }
4250 
4251 struct ephy_info {
4252 	unsigned int offset;
4253 	u16 mask;
4254 	u16 bits;
4255 };
4256 
4257 static void __rtl_ephy_init(struct rtl8169_private *tp,
4258 			    const struct ephy_info *e, int len)
4259 {
4260 	u16 w;
4261 
4262 	while (len-- > 0) {
4263 		w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4264 		rtl_ephy_write(tp, e->offset, w);
4265 		e++;
4266 	}
4267 }
4268 
4269 #define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a))
4270 
4271 static void rtl_disable_clock_request(struct rtl8169_private *tp)
4272 {
4273 	pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL,
4274 				   PCI_EXP_LNKCTL_CLKREQ_EN);
4275 }
4276 
4277 static void rtl_enable_clock_request(struct rtl8169_private *tp)
4278 {
4279 	pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL,
4280 				 PCI_EXP_LNKCTL_CLKREQ_EN);
4281 }
4282 
4283 static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp)
4284 {
4285 	/* work around an issue when PCI reset occurs during L2/L3 state */
4286 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23);
4287 }
4288 
4289 static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
4290 {
4291 	/* Don't enable ASPM in the chip if OS can't control ASPM */
4292 	if (enable && tp->aspm_manageable) {
4293 		RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
4294 		RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
4295 	} else {
4296 		RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
4297 		RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
4298 	}
4299 
4300 	udelay(10);
4301 }
4302 
4303 static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat,
4304 			      u16 tx_stat, u16 rx_dyn, u16 tx_dyn)
4305 {
4306 	/* Usage of dynamic vs. static FIFO is controlled by bit
4307 	 * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known.
4308 	 */
4309 	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn);
4310 	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn);
4311 }
4312 
4313 static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
4314 					  u8 low, u8 high)
4315 {
4316 	/* FIFO thresholds for pause flow control */
4317 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low);
4318 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
4319 }
4320 
4321 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4322 {
4323 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
4324 
4325 	if (tp->dev->mtu <= ETH_DATA_LEN) {
4326 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
4327 					 PCI_EXP_DEVCTL_NOSNOOP_EN);
4328 	}
4329 }
4330 
4331 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4332 {
4333 	rtl_hw_start_8168bb(tp);
4334 
4335 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
4336 }
4337 
4338 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4339 {
4340 	RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down);
4341 
4342 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
4343 
4344 	if (tp->dev->mtu <= ETH_DATA_LEN)
4345 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4346 
4347 	rtl_disable_clock_request(tp);
4348 }
4349 
4350 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4351 {
4352 	static const struct ephy_info e_info_8168cp[] = {
4353 		{ 0x01, 0,	0x0001 },
4354 		{ 0x02, 0x0800,	0x1000 },
4355 		{ 0x03, 0,	0x0042 },
4356 		{ 0x06, 0x0080,	0x0000 },
4357 		{ 0x07, 0,	0x2000 }
4358 	};
4359 
4360 	rtl_set_def_aspm_entry_latency(tp);
4361 
4362 	rtl_ephy_init(tp, e_info_8168cp);
4363 
4364 	__rtl_hw_start_8168cp(tp);
4365 }
4366 
4367 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4368 {
4369 	rtl_set_def_aspm_entry_latency(tp);
4370 
4371 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
4372 
4373 	if (tp->dev->mtu <= ETH_DATA_LEN)
4374 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4375 }
4376 
4377 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4378 {
4379 	rtl_set_def_aspm_entry_latency(tp);
4380 
4381 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
4382 
4383 	/* Magic. */
4384 	RTL_W8(tp, DBG_REG, 0x20);
4385 
4386 	if (tp->dev->mtu <= ETH_DATA_LEN)
4387 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4388 }
4389 
4390 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4391 {
4392 	static const struct ephy_info e_info_8168c_1[] = {
4393 		{ 0x02, 0x0800,	0x1000 },
4394 		{ 0x03, 0,	0x0002 },
4395 		{ 0x06, 0x0080,	0x0000 }
4396 	};
4397 
4398 	rtl_set_def_aspm_entry_latency(tp);
4399 
4400 	RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4401 
4402 	rtl_ephy_init(tp, e_info_8168c_1);
4403 
4404 	__rtl_hw_start_8168cp(tp);
4405 }
4406 
4407 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4408 {
4409 	static const struct ephy_info e_info_8168c_2[] = {
4410 		{ 0x01, 0,	0x0001 },
4411 		{ 0x03, 0x0400,	0x0220 }
4412 	};
4413 
4414 	rtl_set_def_aspm_entry_latency(tp);
4415 
4416 	rtl_ephy_init(tp, e_info_8168c_2);
4417 
4418 	__rtl_hw_start_8168cp(tp);
4419 }
4420 
4421 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4422 {
4423 	rtl_hw_start_8168c_2(tp);
4424 }
4425 
4426 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4427 {
4428 	rtl_set_def_aspm_entry_latency(tp);
4429 
4430 	__rtl_hw_start_8168cp(tp);
4431 }
4432 
4433 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4434 {
4435 	rtl_set_def_aspm_entry_latency(tp);
4436 
4437 	rtl_disable_clock_request(tp);
4438 
4439 	if (tp->dev->mtu <= ETH_DATA_LEN)
4440 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4441 }
4442 
4443 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4444 {
4445 	rtl_set_def_aspm_entry_latency(tp);
4446 
4447 	if (tp->dev->mtu <= ETH_DATA_LEN)
4448 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4449 
4450 	rtl_disable_clock_request(tp);
4451 }
4452 
4453 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4454 {
4455 	static const struct ephy_info e_info_8168d_4[] = {
4456 		{ 0x0b, 0x0000,	0x0048 },
4457 		{ 0x19, 0x0020,	0x0050 },
4458 		{ 0x0c, 0x0100,	0x0020 }
4459 	};
4460 
4461 	rtl_set_def_aspm_entry_latency(tp);
4462 
4463 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4464 
4465 	rtl_ephy_init(tp, e_info_8168d_4);
4466 
4467 	rtl_enable_clock_request(tp);
4468 }
4469 
4470 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4471 {
4472 	static const struct ephy_info e_info_8168e_1[] = {
4473 		{ 0x00, 0x0200,	0x0100 },
4474 		{ 0x00, 0x0000,	0x0004 },
4475 		{ 0x06, 0x0002,	0x0001 },
4476 		{ 0x06, 0x0000,	0x0030 },
4477 		{ 0x07, 0x0000,	0x2000 },
4478 		{ 0x00, 0x0000,	0x0020 },
4479 		{ 0x03, 0x5800,	0x2000 },
4480 		{ 0x03, 0x0000,	0x0001 },
4481 		{ 0x01, 0x0800,	0x1000 },
4482 		{ 0x07, 0x0000,	0x4000 },
4483 		{ 0x1e, 0x0000,	0x2000 },
4484 		{ 0x19, 0xffff,	0xfe6c },
4485 		{ 0x0a, 0x0000,	0x0040 }
4486 	};
4487 
4488 	rtl_set_def_aspm_entry_latency(tp);
4489 
4490 	rtl_ephy_init(tp, e_info_8168e_1);
4491 
4492 	if (tp->dev->mtu <= ETH_DATA_LEN)
4493 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4494 
4495 	rtl_disable_clock_request(tp);
4496 
4497 	/* Reset tx FIFO pointer */
4498 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST);
4499 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST);
4500 
4501 	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
4502 }
4503 
4504 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4505 {
4506 	static const struct ephy_info e_info_8168e_2[] = {
4507 		{ 0x09, 0x0000,	0x0080 },
4508 		{ 0x19, 0x0000,	0x0224 }
4509 	};
4510 
4511 	rtl_set_def_aspm_entry_latency(tp);
4512 
4513 	rtl_ephy_init(tp, e_info_8168e_2);
4514 
4515 	if (tp->dev->mtu <= ETH_DATA_LEN)
4516 		rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4517 
4518 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
4519 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
4520 	rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
4521 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
4522 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060);
4523 	rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
4524 	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
4525 
4526 	rtl_disable_clock_request(tp);
4527 
4528 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
4529 
4530 	rtl8168_config_eee_mac(tp);
4531 
4532 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
4533 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
4534 	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
4535 
4536 	rtl_hw_aspm_clkreq_enable(tp, true);
4537 }
4538 
4539 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
4540 {
4541 	rtl_set_def_aspm_entry_latency(tp);
4542 
4543 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4544 
4545 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
4546 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
4547 	rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
4548 	rtl_reset_packet_filter(tp);
4549 	rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
4550 	rtl_eri_set_bits(tp, 0x1d0, ERIAR_MASK_0001, BIT(4));
4551 	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
4552 	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
4553 
4554 	rtl_disable_clock_request(tp);
4555 
4556 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
4557 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
4558 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
4559 	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
4560 
4561 	rtl8168_config_eee_mac(tp);
4562 }
4563 
4564 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4565 {
4566 	static const struct ephy_info e_info_8168f_1[] = {
4567 		{ 0x06, 0x00c0,	0x0020 },
4568 		{ 0x08, 0x0001,	0x0002 },
4569 		{ 0x09, 0x0000,	0x0080 },
4570 		{ 0x19, 0x0000,	0x0224 }
4571 	};
4572 
4573 	rtl_hw_start_8168f(tp);
4574 
4575 	rtl_ephy_init(tp, e_info_8168f_1);
4576 
4577 	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
4578 }
4579 
4580 static void rtl_hw_start_8411(struct rtl8169_private *tp)
4581 {
4582 	static const struct ephy_info e_info_8168f_1[] = {
4583 		{ 0x06, 0x00c0,	0x0020 },
4584 		{ 0x0f, 0xffff,	0x5200 },
4585 		{ 0x1e, 0x0000,	0x4000 },
4586 		{ 0x19, 0x0000,	0x0224 }
4587 	};
4588 
4589 	rtl_hw_start_8168f(tp);
4590 	rtl_pcie_state_l2l3_disable(tp);
4591 
4592 	rtl_ephy_init(tp, e_info_8168f_1);
4593 
4594 	rtl_eri_set_bits(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00);
4595 }
4596 
4597 static void rtl_hw_start_8168g(struct rtl8169_private *tp)
4598 {
4599 	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
4600 	rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
4601 
4602 	rtl_set_def_aspm_entry_latency(tp);
4603 
4604 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4605 
4606 	rtl_reset_packet_filter(tp);
4607 	rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
4608 
4609 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
4610 
4611 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
4612 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
4613 
4614 	rtl8168_config_eee_mac(tp);
4615 
4616 	rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
4617 	rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
4618 
4619 	rtl_pcie_state_l2l3_disable(tp);
4620 }
4621 
4622 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
4623 {
4624 	static const struct ephy_info e_info_8168g_1[] = {
4625 		{ 0x00, 0x0000,	0x0008 },
4626 		{ 0x0c, 0x37d0,	0x0820 },
4627 		{ 0x1e, 0x0000,	0x0001 },
4628 		{ 0x19, 0x8000,	0x0000 }
4629 	};
4630 
4631 	rtl_hw_start_8168g(tp);
4632 
4633 	/* disable aspm and clock request before access ephy */
4634 	rtl_hw_aspm_clkreq_enable(tp, false);
4635 	rtl_ephy_init(tp, e_info_8168g_1);
4636 	rtl_hw_aspm_clkreq_enable(tp, true);
4637 }
4638 
4639 static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
4640 {
4641 	static const struct ephy_info e_info_8168g_2[] = {
4642 		{ 0x00, 0x0000,	0x0008 },
4643 		{ 0x0c, 0x3df0,	0x0200 },
4644 		{ 0x19, 0xffff,	0xfc00 },
4645 		{ 0x1e, 0xffff,	0x20eb }
4646 	};
4647 
4648 	rtl_hw_start_8168g(tp);
4649 
4650 	/* disable aspm and clock request before access ephy */
4651 	RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
4652 	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
4653 	rtl_ephy_init(tp, e_info_8168g_2);
4654 }
4655 
4656 static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
4657 {
4658 	static const struct ephy_info e_info_8411_2[] = {
4659 		{ 0x00, 0x0000,	0x0008 },
4660 		{ 0x0c, 0x3df0,	0x0200 },
4661 		{ 0x0f, 0xffff,	0x5200 },
4662 		{ 0x19, 0x0020,	0x0000 },
4663 		{ 0x1e, 0x0000,	0x2000 }
4664 	};
4665 
4666 	rtl_hw_start_8168g(tp);
4667 
4668 	/* disable aspm and clock request before access ephy */
4669 	rtl_hw_aspm_clkreq_enable(tp, false);
4670 	rtl_ephy_init(tp, e_info_8411_2);
4671 
4672 	/* The following Realtek-provided magic fixes an issue with the RX unit
4673 	 * getting confused after the PHY having been powered-down.
4674 	 */
4675 	r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
4676 	r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
4677 	r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
4678 	r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
4679 	r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
4680 	r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
4681 	r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
4682 	r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
4683 	mdelay(3);
4684 	r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
4685 
4686 	r8168_mac_ocp_write(tp, 0xF800, 0xE008);
4687 	r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
4688 	r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
4689 	r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
4690 	r8168_mac_ocp_write(tp, 0xF808, 0xE027);
4691 	r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
4692 	r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
4693 	r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
4694 	r8168_mac_ocp_write(tp, 0xF810, 0xC602);
4695 	r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
4696 	r8168_mac_ocp_write(tp, 0xF814, 0x0000);
4697 	r8168_mac_ocp_write(tp, 0xF816, 0xC502);
4698 	r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
4699 	r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
4700 	r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
4701 	r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
4702 	r8168_mac_ocp_write(tp, 0xF820, 0x080A);
4703 	r8168_mac_ocp_write(tp, 0xF822, 0x6420);
4704 	r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
4705 	r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
4706 	r8168_mac_ocp_write(tp, 0xF828, 0xC516);
4707 	r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
4708 	r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
4709 	r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
4710 	r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
4711 	r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
4712 	r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
4713 	r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
4714 	r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
4715 	r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
4716 	r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
4717 	r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
4718 	r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
4719 	r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
4720 	r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
4721 	r8168_mac_ocp_write(tp, 0xF846, 0xC404);
4722 	r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
4723 	r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
4724 	r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
4725 	r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
4726 	r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
4727 	r8168_mac_ocp_write(tp, 0xF852, 0xE434);
4728 	r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
4729 	r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
4730 	r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
4731 	r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
4732 	r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
4733 	r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
4734 	r8168_mac_ocp_write(tp, 0xF860, 0xF007);
4735 	r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
4736 	r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
4737 	r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
4738 	r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
4739 	r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
4740 	r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
4741 	r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
4742 	r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
4743 	r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
4744 	r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
4745 	r8168_mac_ocp_write(tp, 0xF876, 0xC516);
4746 	r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
4747 	r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
4748 	r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
4749 	r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
4750 	r8168_mac_ocp_write(tp, 0xF880, 0xC512);
4751 	r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
4752 	r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
4753 	r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
4754 	r8168_mac_ocp_write(tp, 0xF888, 0x483F);
4755 	r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
4756 	r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
4757 	r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
4758 	r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
4759 	r8168_mac_ocp_write(tp, 0xF892, 0xC505);
4760 	r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
4761 	r8168_mac_ocp_write(tp, 0xF896, 0xC502);
4762 	r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
4763 	r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
4764 	r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
4765 	r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
4766 	r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
4767 	r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
4768 	r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
4769 	r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
4770 	r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
4771 	r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
4772 	r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
4773 	r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
4774 	r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
4775 	r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
4776 	r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
4777 	r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
4778 	r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
4779 	r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
4780 	r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
4781 	r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
4782 	r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
4783 	r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
4784 	r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
4785 	r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
4786 	r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
4787 	r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
4788 	r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
4789 	r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
4790 	r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
4791 	r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
4792 	r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
4793 	r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
4794 	r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
4795 	r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
4796 	r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
4797 
4798 	r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
4799 
4800 	r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
4801 	r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
4802 	r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
4803 	r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
4804 	r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
4805 	r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
4806 	r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
4807 
4808 	rtl_hw_aspm_clkreq_enable(tp, true);
4809 }
4810 
4811 static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
4812 {
4813 	int rg_saw_cnt;
4814 	u32 data;
4815 	static const struct ephy_info e_info_8168h_1[] = {
4816 		{ 0x1e, 0x0800,	0x0001 },
4817 		{ 0x1d, 0x0000,	0x0800 },
4818 		{ 0x05, 0xffff,	0x2089 },
4819 		{ 0x06, 0xffff,	0x5881 },
4820 		{ 0x04, 0xffff,	0x154a },
4821 		{ 0x01, 0xffff,	0x068b }
4822 	};
4823 
4824 	/* disable aspm and clock request before access ephy */
4825 	rtl_hw_aspm_clkreq_enable(tp, false);
4826 	rtl_ephy_init(tp, e_info_8168h_1);
4827 
4828 	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
4829 	rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
4830 
4831 	rtl_set_def_aspm_entry_latency(tp);
4832 
4833 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4834 
4835 	rtl_reset_packet_filter(tp);
4836 
4837 	rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
4838 
4839 	rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f00);
4840 
4841 	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
4842 
4843 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
4844 
4845 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
4846 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
4847 
4848 	rtl8168_config_eee_mac(tp);
4849 
4850 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
4851 	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
4852 
4853 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
4854 
4855 	rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
4856 
4857 	rtl_pcie_state_l2l3_disable(tp);
4858 
4859 	rtl_writephy(tp, 0x1f, 0x0c42);
4860 	rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff);
4861 	rtl_writephy(tp, 0x1f, 0x0000);
4862 	if (rg_saw_cnt > 0) {
4863 		u16 sw_cnt_1ms_ini;
4864 
4865 		sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
4866 		sw_cnt_1ms_ini &= 0x0fff;
4867 		data = r8168_mac_ocp_read(tp, 0xd412);
4868 		data &= ~0x0fff;
4869 		data |= sw_cnt_1ms_ini;
4870 		r8168_mac_ocp_write(tp, 0xd412, data);
4871 	}
4872 
4873 	data = r8168_mac_ocp_read(tp, 0xe056);
4874 	data &= ~0xf0;
4875 	data |= 0x70;
4876 	r8168_mac_ocp_write(tp, 0xe056, data);
4877 
4878 	data = r8168_mac_ocp_read(tp, 0xe052);
4879 	data &= ~0x6000;
4880 	data |= 0x8008;
4881 	r8168_mac_ocp_write(tp, 0xe052, data);
4882 
4883 	data = r8168_mac_ocp_read(tp, 0xe0d6);
4884 	data &= ~0x01ff;
4885 	data |= 0x017f;
4886 	r8168_mac_ocp_write(tp, 0xe0d6, data);
4887 
4888 	data = r8168_mac_ocp_read(tp, 0xd420);
4889 	data &= ~0x0fff;
4890 	data |= 0x047f;
4891 	r8168_mac_ocp_write(tp, 0xd420, data);
4892 
4893 	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
4894 	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
4895 	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
4896 	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
4897 
4898 	rtl_hw_aspm_clkreq_enable(tp, true);
4899 }
4900 
4901 static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
4902 {
4903 	rtl8168ep_stop_cmac(tp);
4904 
4905 	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
4906 	rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
4907 
4908 	rtl_set_def_aspm_entry_latency(tp);
4909 
4910 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
4911 
4912 	rtl_reset_packet_filter(tp);
4913 
4914 	rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
4915 
4916 	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
4917 
4918 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
4919 
4920 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
4921 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
4922 
4923 	rtl8168_config_eee_mac(tp);
4924 
4925 	rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
4926 
4927 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
4928 
4929 	rtl_pcie_state_l2l3_disable(tp);
4930 }
4931 
4932 static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
4933 {
4934 	static const struct ephy_info e_info_8168ep_1[] = {
4935 		{ 0x00, 0xffff,	0x10ab },
4936 		{ 0x06, 0xffff,	0xf030 },
4937 		{ 0x08, 0xffff,	0x2006 },
4938 		{ 0x0d, 0xffff,	0x1666 },
4939 		{ 0x0c, 0x3ff0,	0x0000 }
4940 	};
4941 
4942 	/* disable aspm and clock request before access ephy */
4943 	rtl_hw_aspm_clkreq_enable(tp, false);
4944 	rtl_ephy_init(tp, e_info_8168ep_1);
4945 
4946 	rtl_hw_start_8168ep(tp);
4947 
4948 	rtl_hw_aspm_clkreq_enable(tp, true);
4949 }
4950 
4951 static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
4952 {
4953 	static const struct ephy_info e_info_8168ep_2[] = {
4954 		{ 0x00, 0xffff,	0x10a3 },
4955 		{ 0x19, 0xffff,	0xfc00 },
4956 		{ 0x1e, 0xffff,	0x20ea }
4957 	};
4958 
4959 	/* disable aspm and clock request before access ephy */
4960 	rtl_hw_aspm_clkreq_enable(tp, false);
4961 	rtl_ephy_init(tp, e_info_8168ep_2);
4962 
4963 	rtl_hw_start_8168ep(tp);
4964 
4965 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
4966 	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
4967 
4968 	rtl_hw_aspm_clkreq_enable(tp, true);
4969 }
4970 
4971 static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
4972 {
4973 	u32 data;
4974 	static const struct ephy_info e_info_8168ep_3[] = {
4975 		{ 0x00, 0xffff,	0x10a3 },
4976 		{ 0x19, 0xffff,	0x7c00 },
4977 		{ 0x1e, 0xffff,	0x20eb },
4978 		{ 0x0d, 0xffff,	0x1666 }
4979 	};
4980 
4981 	/* disable aspm and clock request before access ephy */
4982 	rtl_hw_aspm_clkreq_enable(tp, false);
4983 	rtl_ephy_init(tp, e_info_8168ep_3);
4984 
4985 	rtl_hw_start_8168ep(tp);
4986 
4987 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
4988 	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
4989 
4990 	data = r8168_mac_ocp_read(tp, 0xd3e2);
4991 	data &= 0xf000;
4992 	data |= 0x0271;
4993 	r8168_mac_ocp_write(tp, 0xd3e2, data);
4994 
4995 	data = r8168_mac_ocp_read(tp, 0xd3e4);
4996 	data &= 0xff00;
4997 	r8168_mac_ocp_write(tp, 0xd3e4, data);
4998 
4999 	data = r8168_mac_ocp_read(tp, 0xe860);
5000 	data |= 0x0080;
5001 	r8168_mac_ocp_write(tp, 0xe860, data);
5002 
5003 	rtl_hw_aspm_clkreq_enable(tp, true);
5004 }
5005 
5006 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5007 {
5008 	static const struct ephy_info e_info_8102e_1[] = {
5009 		{ 0x01,	0, 0x6e65 },
5010 		{ 0x02,	0, 0x091f },
5011 		{ 0x03,	0, 0xc2f9 },
5012 		{ 0x06,	0, 0xafb5 },
5013 		{ 0x07,	0, 0x0e00 },
5014 		{ 0x19,	0, 0xec80 },
5015 		{ 0x01,	0, 0x2e65 },
5016 		{ 0x01,	0, 0x6e65 }
5017 	};
5018 	u8 cfg1;
5019 
5020 	rtl_set_def_aspm_entry_latency(tp);
5021 
5022 	RTL_W8(tp, DBG_REG, FIX_NAK_1);
5023 
5024 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
5025 
5026 	RTL_W8(tp, Config1,
5027 	       LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5028 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
5029 
5030 	cfg1 = RTL_R8(tp, Config1);
5031 	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5032 		RTL_W8(tp, Config1, cfg1 & ~LEDS0);
5033 
5034 	rtl_ephy_init(tp, e_info_8102e_1);
5035 }
5036 
5037 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5038 {
5039 	rtl_set_def_aspm_entry_latency(tp);
5040 
5041 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
5042 
5043 	RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
5044 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
5045 }
5046 
5047 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5048 {
5049 	rtl_hw_start_8102e_2(tp);
5050 
5051 	rtl_ephy_write(tp, 0x03, 0xc2f9);
5052 }
5053 
5054 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5055 {
5056 	static const struct ephy_info e_info_8105e_1[] = {
5057 		{ 0x07,	0, 0x4000 },
5058 		{ 0x19,	0, 0x0200 },
5059 		{ 0x19,	0, 0x0020 },
5060 		{ 0x1e,	0, 0x2000 },
5061 		{ 0x03,	0, 0x0001 },
5062 		{ 0x19,	0, 0x0100 },
5063 		{ 0x19,	0, 0x0004 },
5064 		{ 0x0a,	0, 0x0020 }
5065 	};
5066 
5067 	/* Force LAN exit from ASPM if Rx/Tx are not idle */
5068 	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5069 
5070 	/* Disable Early Tally Counter */
5071 	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000);
5072 
5073 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
5074 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
5075 
5076 	rtl_ephy_init(tp, e_info_8105e_1);
5077 
5078 	rtl_pcie_state_l2l3_disable(tp);
5079 }
5080 
5081 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5082 {
5083 	rtl_hw_start_8105e_1(tp);
5084 	rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5085 }
5086 
5087 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5088 {
5089 	static const struct ephy_info e_info_8402[] = {
5090 		{ 0x19,	0xffff, 0xff64 },
5091 		{ 0x1e,	0, 0x4000 }
5092 	};
5093 
5094 	rtl_set_def_aspm_entry_latency(tp);
5095 
5096 	/* Force LAN exit from ASPM if Rx/Tx are not idle */
5097 	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5098 
5099 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5100 
5101 	rtl_ephy_init(tp, e_info_8402);
5102 
5103 	rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
5104 
5105 	rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
5106 	rtl_reset_packet_filter(tp);
5107 	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
5108 	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
5109 	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
5110 
5111 	rtl_pcie_state_l2l3_disable(tp);
5112 }
5113 
5114 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5115 {
5116 	rtl_hw_aspm_clkreq_enable(tp, false);
5117 
5118 	/* Force LAN exit from ASPM if Rx/Tx are not idle */
5119 	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5120 
5121 	RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5122 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
5123 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
5124 
5125 	rtl_pcie_state_l2l3_disable(tp);
5126 	rtl_hw_aspm_clkreq_enable(tp, true);
5127 }
5128 
5129 static void rtl_hw_config(struct rtl8169_private *tp)
5130 {
5131 	static const rtl_generic_fct hw_configs[] = {
5132 		[RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1,
5133 		[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
5134 		[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
5135 		[RTL_GIGA_MAC_VER_10] = NULL,
5136 		[RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168bb,
5137 		[RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168bef,
5138 		[RTL_GIGA_MAC_VER_13] = NULL,
5139 		[RTL_GIGA_MAC_VER_14] = NULL,
5140 		[RTL_GIGA_MAC_VER_15] = NULL,
5141 		[RTL_GIGA_MAC_VER_16] = NULL,
5142 		[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168bef,
5143 		[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
5144 		[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
5145 		[RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
5146 		[RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3,
5147 		[RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4,
5148 		[RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2,
5149 		[RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
5150 		[RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
5151 		[RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
5152 		[RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
5153 		[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
5154 		[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
5155 		[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
5156 		[RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168dp,
5157 		[RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
5158 		[RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
5159 		[RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
5160 		[RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1,
5161 		[RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1,
5162 		[RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402,
5163 		[RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
5164 		[RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
5165 		[RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
5166 		[RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1,
5167 		[RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
5168 		[RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
5169 		[RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
5170 		[RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1,
5171 		[RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
5172 		[RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1,
5173 		[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
5174 		[RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
5175 		[RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
5176 		[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
5177 	};
5178 
5179 	if (hw_configs[tp->mac_version])
5180 		hw_configs[tp->mac_version](tp);
5181 }
5182 
5183 static void rtl_hw_start_8168(struct rtl8169_private *tp)
5184 {
5185 	if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5186 	    tp->mac_version == RTL_GIGA_MAC_VER_16)
5187 		pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
5188 					 PCI_EXP_DEVCTL_NOSNOOP_EN);
5189 
5190 	if (rtl_is_8168evl_up(tp))
5191 		RTL_W8(tp, MaxTxPacketSize, EarlySize);
5192 	else
5193 		RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
5194 
5195 	rtl_hw_config(tp);
5196 }
5197 
5198 static void rtl_hw_start_8169(struct rtl8169_private *tp)
5199 {
5200 	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
5201 		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
5202 
5203 	RTL_W8(tp, EarlyTxThres, NoEarlyTx);
5204 
5205 	tp->cp_cmd |= PCIMulRW;
5206 
5207 	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
5208 	    tp->mac_version == RTL_GIGA_MAC_VER_03) {
5209 		netif_dbg(tp, drv, tp->dev,
5210 			  "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
5211 		tp->cp_cmd |= (1 << 14);
5212 	}
5213 
5214 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5215 
5216 	rtl8169_set_magic_reg(tp, tp->mac_version);
5217 
5218 	RTL_W32(tp, RxMissed, 0);
5219 }
5220 
5221 static void rtl_hw_start(struct  rtl8169_private *tp)
5222 {
5223 	rtl_unlock_config_regs(tp);
5224 
5225 	tp->cp_cmd &= CPCMD_MASK;
5226 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5227 
5228 	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
5229 		rtl_hw_start_8169(tp);
5230 	else
5231 		rtl_hw_start_8168(tp);
5232 
5233 	rtl_set_rx_max_size(tp);
5234 	rtl_set_rx_tx_desc_registers(tp);
5235 	rtl_lock_config_regs(tp);
5236 
5237 	/* disable interrupt coalescing */
5238 	RTL_W16(tp, IntrMitigate, 0x0000);
5239 	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
5240 	RTL_R8(tp, IntrMask);
5241 	RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
5242 	rtl_init_rxcfg(tp);
5243 	rtl_set_tx_config_registers(tp);
5244 
5245 	rtl_set_rx_mode(tp->dev);
5246 	/* no early-rx interrupts */
5247 	RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
5248 	rtl_irq_enable(tp);
5249 }
5250 
5251 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5252 {
5253 	struct rtl8169_private *tp = netdev_priv(dev);
5254 
5255 	if (new_mtu > ETH_DATA_LEN)
5256 		rtl_hw_jumbo_enable(tp);
5257 	else
5258 		rtl_hw_jumbo_disable(tp);
5259 
5260 	dev->mtu = new_mtu;
5261 	netdev_update_features(dev);
5262 
5263 	return 0;
5264 }
5265 
5266 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5267 {
5268 	desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5269 	desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5270 }
5271 
5272 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5273 				     void **data_buff, struct RxDesc *desc)
5274 {
5275 	dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr),
5276 			 R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
5277 
5278 	kfree(*data_buff);
5279 	*data_buff = NULL;
5280 	rtl8169_make_unusable_by_asic(desc);
5281 }
5282 
5283 static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
5284 {
5285 	u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5286 
5287 	/* Force memory writes to complete before releasing descriptor */
5288 	dma_wmb();
5289 
5290 	desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
5291 }
5292 
5293 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5294 					     struct RxDesc *desc)
5295 {
5296 	void *data;
5297 	dma_addr_t mapping;
5298 	struct device *d = tp_to_dev(tp);
5299 	int node = dev_to_node(d);
5300 
5301 	data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node);
5302 	if (!data)
5303 		return NULL;
5304 
5305 	/* Memory should be properly aligned, but better check. */
5306 	if (!IS_ALIGNED((unsigned long)data, 8)) {
5307 		netdev_err_once(tp->dev, "RX buffer not 8-byte-aligned\n");
5308 		goto err_out;
5309 	}
5310 
5311 	mapping = dma_map_single(d, data, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
5312 	if (unlikely(dma_mapping_error(d, mapping))) {
5313 		if (net_ratelimit())
5314 			netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5315 		goto err_out;
5316 	}
5317 
5318 	desc->addr = cpu_to_le64(mapping);
5319 	rtl8169_mark_to_asic(desc);
5320 	return data;
5321 
5322 err_out:
5323 	kfree(data);
5324 	return NULL;
5325 }
5326 
5327 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5328 {
5329 	unsigned int i;
5330 
5331 	for (i = 0; i < NUM_RX_DESC; i++) {
5332 		if (tp->Rx_databuff[i]) {
5333 			rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5334 					    tp->RxDescArray + i);
5335 		}
5336 	}
5337 }
5338 
5339 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5340 {
5341 	desc->opts1 |= cpu_to_le32(RingEnd);
5342 }
5343 
5344 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5345 {
5346 	unsigned int i;
5347 
5348 	for (i = 0; i < NUM_RX_DESC; i++) {
5349 		void *data;
5350 
5351 		data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5352 		if (!data) {
5353 			rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5354 			goto err_out;
5355 		}
5356 		tp->Rx_databuff[i] = data;
5357 	}
5358 
5359 	rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5360 	return 0;
5361 
5362 err_out:
5363 	rtl8169_rx_clear(tp);
5364 	return -ENOMEM;
5365 }
5366 
5367 static int rtl8169_init_ring(struct rtl8169_private *tp)
5368 {
5369 	rtl8169_init_ring_indexes(tp);
5370 
5371 	memset(tp->tx_skb, 0, sizeof(tp->tx_skb));
5372 	memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff));
5373 
5374 	return rtl8169_rx_fill(tp);
5375 }
5376 
5377 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5378 				 struct TxDesc *desc)
5379 {
5380 	unsigned int len = tx_skb->len;
5381 
5382 	dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5383 
5384 	desc->opts1 = 0x00;
5385 	desc->opts2 = 0x00;
5386 	desc->addr = 0x00;
5387 	tx_skb->len = 0;
5388 }
5389 
5390 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5391 				   unsigned int n)
5392 {
5393 	unsigned int i;
5394 
5395 	for (i = 0; i < n; i++) {
5396 		unsigned int entry = (start + i) % NUM_TX_DESC;
5397 		struct ring_info *tx_skb = tp->tx_skb + entry;
5398 		unsigned int len = tx_skb->len;
5399 
5400 		if (len) {
5401 			struct sk_buff *skb = tx_skb->skb;
5402 
5403 			rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
5404 					     tp->TxDescArray + entry);
5405 			if (skb) {
5406 				dev_consume_skb_any(skb);
5407 				tx_skb->skb = NULL;
5408 			}
5409 		}
5410 	}
5411 }
5412 
5413 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5414 {
5415 	rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5416 	tp->cur_tx = tp->dirty_tx = 0;
5417 	netdev_reset_queue(tp->dev);
5418 }
5419 
5420 static void rtl_reset_work(struct rtl8169_private *tp)
5421 {
5422 	struct net_device *dev = tp->dev;
5423 	int i;
5424 
5425 	napi_disable(&tp->napi);
5426 	netif_stop_queue(dev);
5427 	synchronize_rcu();
5428 
5429 	rtl8169_hw_reset(tp);
5430 
5431 	for (i = 0; i < NUM_RX_DESC; i++)
5432 		rtl8169_mark_to_asic(tp->RxDescArray + i);
5433 
5434 	rtl8169_tx_clear(tp);
5435 	rtl8169_init_ring_indexes(tp);
5436 
5437 	napi_enable(&tp->napi);
5438 	rtl_hw_start(tp);
5439 	netif_wake_queue(dev);
5440 }
5441 
5442 static void rtl8169_tx_timeout(struct net_device *dev)
5443 {
5444 	struct rtl8169_private *tp = netdev_priv(dev);
5445 
5446 	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5447 }
5448 
5449 static __le32 rtl8169_get_txd_opts1(u32 opts0, u32 len, unsigned int entry)
5450 {
5451 	u32 status = opts0 | len;
5452 
5453 	if (entry == NUM_TX_DESC - 1)
5454 		status |= RingEnd;
5455 
5456 	return cpu_to_le32(status);
5457 }
5458 
5459 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5460 			      u32 *opts)
5461 {
5462 	struct skb_shared_info *info = skb_shinfo(skb);
5463 	unsigned int cur_frag, entry;
5464 	struct TxDesc *uninitialized_var(txd);
5465 	struct device *d = tp_to_dev(tp);
5466 
5467 	entry = tp->cur_tx;
5468 	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5469 		const skb_frag_t *frag = info->frags + cur_frag;
5470 		dma_addr_t mapping;
5471 		u32 len;
5472 		void *addr;
5473 
5474 		entry = (entry + 1) % NUM_TX_DESC;
5475 
5476 		txd = tp->TxDescArray + entry;
5477 		len = skb_frag_size(frag);
5478 		addr = skb_frag_address(frag);
5479 		mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5480 		if (unlikely(dma_mapping_error(d, mapping))) {
5481 			if (net_ratelimit())
5482 				netif_err(tp, drv, tp->dev,
5483 					  "Failed to map TX fragments DMA!\n");
5484 			goto err_out;
5485 		}
5486 
5487 		txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
5488 		txd->opts2 = cpu_to_le32(opts[1]);
5489 		txd->addr = cpu_to_le64(mapping);
5490 
5491 		tp->tx_skb[entry].len = len;
5492 	}
5493 
5494 	if (cur_frag) {
5495 		tp->tx_skb[entry].skb = skb;
5496 		txd->opts1 |= cpu_to_le32(LastFrag);
5497 	}
5498 
5499 	return cur_frag;
5500 
5501 err_out:
5502 	rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5503 	return -EIO;
5504 }
5505 
5506 static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
5507 {
5508 	return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
5509 }
5510 
5511 /* msdn_giant_send_check()
5512  * According to the document of microsoft, the TCP Pseudo Header excludes the
5513  * packet length for IPv6 TCP large packets.
5514  */
5515 static int msdn_giant_send_check(struct sk_buff *skb)
5516 {
5517 	const struct ipv6hdr *ipv6h;
5518 	struct tcphdr *th;
5519 	int ret;
5520 
5521 	ret = skb_cow_head(skb, 0);
5522 	if (ret)
5523 		return ret;
5524 
5525 	ipv6h = ipv6_hdr(skb);
5526 	th = tcp_hdr(skb);
5527 
5528 	th->check = 0;
5529 	th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
5530 
5531 	return ret;
5532 }
5533 
5534 static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
5535 {
5536 	u32 mss = skb_shinfo(skb)->gso_size;
5537 
5538 	if (mss) {
5539 		opts[0] |= TD_LSO;
5540 		opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT;
5541 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5542 		const struct iphdr *ip = ip_hdr(skb);
5543 
5544 		if (ip->protocol == IPPROTO_TCP)
5545 			opts[0] |= TD0_IP_CS | TD0_TCP_CS;
5546 		else if (ip->protocol == IPPROTO_UDP)
5547 			opts[0] |= TD0_IP_CS | TD0_UDP_CS;
5548 		else
5549 			WARN_ON_ONCE(1);
5550 	}
5551 }
5552 
5553 static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
5554 				struct sk_buff *skb, u32 *opts)
5555 {
5556 	u32 transport_offset = (u32)skb_transport_offset(skb);
5557 	u32 mss = skb_shinfo(skb)->gso_size;
5558 
5559 	if (mss) {
5560 		switch (vlan_get_protocol(skb)) {
5561 		case htons(ETH_P_IP):
5562 			opts[0] |= TD1_GTSENV4;
5563 			break;
5564 
5565 		case htons(ETH_P_IPV6):
5566 			if (msdn_giant_send_check(skb))
5567 				return false;
5568 
5569 			opts[0] |= TD1_GTSENV6;
5570 			break;
5571 
5572 		default:
5573 			WARN_ON_ONCE(1);
5574 			break;
5575 		}
5576 
5577 		opts[0] |= transport_offset << GTTCPHO_SHIFT;
5578 		opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT;
5579 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5580 		u8 ip_protocol;
5581 
5582 		switch (vlan_get_protocol(skb)) {
5583 		case htons(ETH_P_IP):
5584 			opts[1] |= TD1_IPv4_CS;
5585 			ip_protocol = ip_hdr(skb)->protocol;
5586 			break;
5587 
5588 		case htons(ETH_P_IPV6):
5589 			opts[1] |= TD1_IPv6_CS;
5590 			ip_protocol = ipv6_hdr(skb)->nexthdr;
5591 			break;
5592 
5593 		default:
5594 			ip_protocol = IPPROTO_RAW;
5595 			break;
5596 		}
5597 
5598 		if (ip_protocol == IPPROTO_TCP)
5599 			opts[1] |= TD1_TCP_CS;
5600 		else if (ip_protocol == IPPROTO_UDP)
5601 			opts[1] |= TD1_UDP_CS;
5602 		else
5603 			WARN_ON_ONCE(1);
5604 
5605 		opts[1] |= transport_offset << TCPHO_SHIFT;
5606 	} else {
5607 		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
5608 			return !eth_skb_pad(skb);
5609 	}
5610 
5611 	return true;
5612 }
5613 
5614 static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
5615 			       unsigned int nr_frags)
5616 {
5617 	unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
5618 
5619 	/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
5620 	return slots_avail > nr_frags;
5621 }
5622 
5623 /* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
5624 static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
5625 {
5626 	switch (tp->mac_version) {
5627 	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
5628 	case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
5629 		return false;
5630 	default:
5631 		return true;
5632 	}
5633 }
5634 
5635 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5636 				      struct net_device *dev)
5637 {
5638 	struct rtl8169_private *tp = netdev_priv(dev);
5639 	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5640 	struct TxDesc *txd = tp->TxDescArray + entry;
5641 	struct device *d = tp_to_dev(tp);
5642 	dma_addr_t mapping;
5643 	u32 opts[2], len;
5644 	bool stop_queue;
5645 	bool door_bell;
5646 	int frags;
5647 
5648 	if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
5649 		netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5650 		goto err_stop_0;
5651 	}
5652 
5653 	if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5654 		goto err_stop_0;
5655 
5656 	opts[1] = rtl8169_tx_vlan_tag(skb);
5657 	opts[0] = DescOwn;
5658 
5659 	if (rtl_chip_supports_csum_v2(tp)) {
5660 		if (!rtl8169_tso_csum_v2(tp, skb, opts))
5661 			goto err_dma_0;
5662 	} else {
5663 		rtl8169_tso_csum_v1(skb, opts);
5664 	}
5665 
5666 	len = skb_headlen(skb);
5667 	mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5668 	if (unlikely(dma_mapping_error(d, mapping))) {
5669 		if (net_ratelimit())
5670 			netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5671 		goto err_dma_0;
5672 	}
5673 
5674 	tp->tx_skb[entry].len = len;
5675 	txd->addr = cpu_to_le64(mapping);
5676 
5677 	frags = rtl8169_xmit_frags(tp, skb, opts);
5678 	if (frags < 0)
5679 		goto err_dma_1;
5680 	else if (frags)
5681 		opts[0] |= FirstFrag;
5682 	else {
5683 		opts[0] |= FirstFrag | LastFrag;
5684 		tp->tx_skb[entry].skb = skb;
5685 	}
5686 
5687 	txd->opts2 = cpu_to_le32(opts[1]);
5688 
5689 	skb_tx_timestamp(skb);
5690 
5691 	/* Force memory writes to complete before releasing descriptor */
5692 	dma_wmb();
5693 
5694 	door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
5695 
5696 	txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
5697 
5698 	/* Force all memory writes to complete before notifying device */
5699 	wmb();
5700 
5701 	tp->cur_tx += frags + 1;
5702 
5703 	stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
5704 	if (unlikely(stop_queue)) {
5705 		/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5706 		 * not miss a ring update when it notices a stopped queue.
5707 		 */
5708 		smp_wmb();
5709 		netif_stop_queue(dev);
5710 	}
5711 
5712 	if (door_bell)
5713 		RTL_W8(tp, TxPoll, NPQ);
5714 
5715 	if (unlikely(stop_queue)) {
5716 		/* Sync with rtl_tx:
5717 		 * - publish queue status and cur_tx ring index (write barrier)
5718 		 * - refresh dirty_tx ring index (read barrier).
5719 		 * May the current thread have a pessimistic view of the ring
5720 		 * status and forget to wake up queue, a racing rtl_tx thread
5721 		 * can't.
5722 		 */
5723 		smp_mb();
5724 		if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
5725 			netif_start_queue(dev);
5726 	}
5727 
5728 	return NETDEV_TX_OK;
5729 
5730 err_dma_1:
5731 	rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5732 err_dma_0:
5733 	dev_kfree_skb_any(skb);
5734 	dev->stats.tx_dropped++;
5735 	return NETDEV_TX_OK;
5736 
5737 err_stop_0:
5738 	netif_stop_queue(dev);
5739 	dev->stats.tx_dropped++;
5740 	return NETDEV_TX_BUSY;
5741 }
5742 
5743 static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
5744 						struct net_device *dev,
5745 						netdev_features_t features)
5746 {
5747 	int transport_offset = skb_transport_offset(skb);
5748 	struct rtl8169_private *tp = netdev_priv(dev);
5749 
5750 	if (skb_is_gso(skb)) {
5751 		if (transport_offset > GTTCPHO_MAX &&
5752 		    rtl_chip_supports_csum_v2(tp))
5753 			features &= ~NETIF_F_ALL_TSO;
5754 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5755 		if (skb->len < ETH_ZLEN) {
5756 			switch (tp->mac_version) {
5757 			case RTL_GIGA_MAC_VER_11:
5758 			case RTL_GIGA_MAC_VER_12:
5759 			case RTL_GIGA_MAC_VER_17:
5760 			case RTL_GIGA_MAC_VER_34:
5761 				features &= ~NETIF_F_CSUM_MASK;
5762 				break;
5763 			default:
5764 				break;
5765 			}
5766 		}
5767 
5768 		if (transport_offset > TCPHO_MAX &&
5769 		    rtl_chip_supports_csum_v2(tp))
5770 			features &= ~NETIF_F_CSUM_MASK;
5771 	}
5772 
5773 	return vlan_features_check(skb, features);
5774 }
5775 
5776 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5777 {
5778 	struct rtl8169_private *tp = netdev_priv(dev);
5779 	struct pci_dev *pdev = tp->pci_dev;
5780 	u16 pci_status, pci_cmd;
5781 
5782 	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5783 	pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5784 
5785 	netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5786 		  pci_cmd, pci_status);
5787 
5788 	/*
5789 	 * The recovery sequence below admits a very elaborated explanation:
5790 	 * - it seems to work;
5791 	 * - I did not see what else could be done;
5792 	 * - it makes iop3xx happy.
5793 	 *
5794 	 * Feel free to adjust to your needs.
5795 	 */
5796 	if (pdev->broken_parity_status)
5797 		pci_cmd &= ~PCI_COMMAND_PARITY;
5798 	else
5799 		pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5800 
5801 	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5802 
5803 	pci_write_config_word(pdev, PCI_STATUS,
5804 		pci_status & (PCI_STATUS_DETECTED_PARITY |
5805 		PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5806 		PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5807 
5808 	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5809 }
5810 
5811 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
5812 		   int budget)
5813 {
5814 	unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
5815 
5816 	dirty_tx = tp->dirty_tx;
5817 	smp_rmb();
5818 	tx_left = tp->cur_tx - dirty_tx;
5819 
5820 	while (tx_left > 0) {
5821 		unsigned int entry = dirty_tx % NUM_TX_DESC;
5822 		struct ring_info *tx_skb = tp->tx_skb + entry;
5823 		u32 status;
5824 
5825 		status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5826 		if (status & DescOwn)
5827 			break;
5828 
5829 		/* This barrier is needed to keep us from reading
5830 		 * any other fields out of the Tx descriptor until
5831 		 * we know the status of DescOwn
5832 		 */
5833 		dma_rmb();
5834 
5835 		rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
5836 				     tp->TxDescArray + entry);
5837 		if (status & LastFrag) {
5838 			pkts_compl++;
5839 			bytes_compl += tx_skb->skb->len;
5840 			napi_consume_skb(tx_skb->skb, budget);
5841 			tx_skb->skb = NULL;
5842 		}
5843 		dirty_tx++;
5844 		tx_left--;
5845 	}
5846 
5847 	if (tp->dirty_tx != dirty_tx) {
5848 		netdev_completed_queue(dev, pkts_compl, bytes_compl);
5849 
5850 		u64_stats_update_begin(&tp->tx_stats.syncp);
5851 		tp->tx_stats.packets += pkts_compl;
5852 		tp->tx_stats.bytes += bytes_compl;
5853 		u64_stats_update_end(&tp->tx_stats.syncp);
5854 
5855 		tp->dirty_tx = dirty_tx;
5856 		/* Sync with rtl8169_start_xmit:
5857 		 * - publish dirty_tx ring index (write barrier)
5858 		 * - refresh cur_tx ring index and queue status (read barrier)
5859 		 * May the current thread miss the stopped queue condition,
5860 		 * a racing xmit thread can only have a right view of the
5861 		 * ring status.
5862 		 */
5863 		smp_mb();
5864 		if (netif_queue_stopped(dev) &&
5865 		    rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
5866 			netif_wake_queue(dev);
5867 		}
5868 		/*
5869 		 * 8168 hack: TxPoll requests are lost when the Tx packets are
5870 		 * too close. Let's kick an extra TxPoll request when a burst
5871 		 * of start_xmit activity is detected (if it is not detected,
5872 		 * it is slow enough). -- FR
5873 		 */
5874 		if (tp->cur_tx != dirty_tx)
5875 			RTL_W8(tp, TxPoll, NPQ);
5876 	}
5877 }
5878 
5879 static inline int rtl8169_fragmented_frame(u32 status)
5880 {
5881 	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5882 }
5883 
5884 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5885 {
5886 	u32 status = opts1 & RxProtoMask;
5887 
5888 	if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5889 	    ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5890 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5891 	else
5892 		skb_checksum_none_assert(skb);
5893 }
5894 
5895 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5896 {
5897 	unsigned int cur_rx, rx_left;
5898 	unsigned int count;
5899 
5900 	cur_rx = tp->cur_rx;
5901 
5902 	for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
5903 		unsigned int entry = cur_rx % NUM_RX_DESC;
5904 		struct RxDesc *desc = tp->RxDescArray + entry;
5905 		u32 status;
5906 
5907 		status = le32_to_cpu(desc->opts1);
5908 		if (status & DescOwn)
5909 			break;
5910 
5911 		/* This barrier is needed to keep us from reading
5912 		 * any other fields out of the Rx descriptor until
5913 		 * we know the status of DescOwn
5914 		 */
5915 		dma_rmb();
5916 
5917 		if (unlikely(status & RxRES)) {
5918 			netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
5919 				   status);
5920 			dev->stats.rx_errors++;
5921 			if (status & (RxRWT | RxRUNT))
5922 				dev->stats.rx_length_errors++;
5923 			if (status & RxCRC)
5924 				dev->stats.rx_crc_errors++;
5925 			if (status & (RxRUNT | RxCRC) && !(status & RxRWT) &&
5926 			    dev->features & NETIF_F_RXALL) {
5927 				goto process_pkt;
5928 			}
5929 		} else {
5930 			unsigned int pkt_size;
5931 			struct sk_buff *skb;
5932 
5933 process_pkt:
5934 			pkt_size = status & GENMASK(13, 0);
5935 			if (likely(!(dev->features & NETIF_F_RXFCS)))
5936 				pkt_size -= ETH_FCS_LEN;
5937 			/*
5938 			 * The driver does not support incoming fragmented
5939 			 * frames. They are seen as a symptom of over-mtu
5940 			 * sized frames.
5941 			 */
5942 			if (unlikely(rtl8169_fragmented_frame(status))) {
5943 				dev->stats.rx_dropped++;
5944 				dev->stats.rx_length_errors++;
5945 				goto release_descriptor;
5946 			}
5947 
5948 			dma_sync_single_for_cpu(tp_to_dev(tp),
5949 						le64_to_cpu(desc->addr),
5950 						pkt_size, DMA_FROM_DEVICE);
5951 
5952 			skb = napi_alloc_skb(&tp->napi, pkt_size);
5953 			if (unlikely(!skb)) {
5954 				dev->stats.rx_dropped++;
5955 				goto release_descriptor;
5956 			}
5957 
5958 			prefetch(tp->Rx_databuff[entry]);
5959 			skb_copy_to_linear_data(skb, tp->Rx_databuff[entry],
5960 						pkt_size);
5961 			skb->tail += pkt_size;
5962 			skb->len = pkt_size;
5963 
5964 			rtl8169_rx_csum(skb, status);
5965 			skb->protocol = eth_type_trans(skb, dev);
5966 
5967 			rtl8169_rx_vlan_tag(desc, skb);
5968 
5969 			if (skb->pkt_type == PACKET_MULTICAST)
5970 				dev->stats.multicast++;
5971 
5972 			napi_gro_receive(&tp->napi, skb);
5973 
5974 			u64_stats_update_begin(&tp->rx_stats.syncp);
5975 			tp->rx_stats.packets++;
5976 			tp->rx_stats.bytes += pkt_size;
5977 			u64_stats_update_end(&tp->rx_stats.syncp);
5978 		}
5979 release_descriptor:
5980 		desc->opts2 = 0;
5981 		rtl8169_mark_to_asic(desc);
5982 	}
5983 
5984 	count = cur_rx - tp->cur_rx;
5985 	tp->cur_rx = cur_rx;
5986 
5987 	return count;
5988 }
5989 
5990 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5991 {
5992 	struct rtl8169_private *tp = dev_instance;
5993 	u16 status = RTL_R16(tp, IntrStatus);
5994 
5995 	if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
5996 		return IRQ_NONE;
5997 
5998 	if (unlikely(status & SYSErr)) {
5999 		rtl8169_pcierr_interrupt(tp->dev);
6000 		goto out;
6001 	}
6002 
6003 	if (status & LinkChg)
6004 		phy_mac_interrupt(tp->phydev);
6005 
6006 	if (unlikely(status & RxFIFOOver &&
6007 	    tp->mac_version == RTL_GIGA_MAC_VER_11)) {
6008 		netif_stop_queue(tp->dev);
6009 		/* XXX - Hack alert. See rtl_task(). */
6010 		set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6011 	}
6012 
6013 	rtl_irq_disable(tp);
6014 	napi_schedule_irqoff(&tp->napi);
6015 out:
6016 	rtl_ack_events(tp, status);
6017 
6018 	return IRQ_HANDLED;
6019 }
6020 
6021 static void rtl_task(struct work_struct *work)
6022 {
6023 	static const struct {
6024 		int bitnr;
6025 		void (*action)(struct rtl8169_private *);
6026 	} rtl_work[] = {
6027 		{ RTL_FLAG_TASK_RESET_PENDING,	rtl_reset_work },
6028 	};
6029 	struct rtl8169_private *tp =
6030 		container_of(work, struct rtl8169_private, wk.work);
6031 	struct net_device *dev = tp->dev;
6032 	int i;
6033 
6034 	rtl_lock_work(tp);
6035 
6036 	if (!netif_running(dev) ||
6037 	    !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6038 		goto out_unlock;
6039 
6040 	for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6041 		bool pending;
6042 
6043 		pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6044 		if (pending)
6045 			rtl_work[i].action(tp);
6046 	}
6047 
6048 out_unlock:
6049 	rtl_unlock_work(tp);
6050 }
6051 
6052 static int rtl8169_poll(struct napi_struct *napi, int budget)
6053 {
6054 	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6055 	struct net_device *dev = tp->dev;
6056 	int work_done;
6057 
6058 	work_done = rtl_rx(dev, tp, (u32) budget);
6059 
6060 	rtl_tx(dev, tp, budget);
6061 
6062 	if (work_done < budget) {
6063 		napi_complete_done(napi, work_done);
6064 		rtl_irq_enable(tp);
6065 	}
6066 
6067 	return work_done;
6068 }
6069 
6070 static void rtl8169_rx_missed(struct net_device *dev)
6071 {
6072 	struct rtl8169_private *tp = netdev_priv(dev);
6073 
6074 	if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6075 		return;
6076 
6077 	dev->stats.rx_missed_errors += RTL_R32(tp, RxMissed) & 0xffffff;
6078 	RTL_W32(tp, RxMissed, 0);
6079 }
6080 
6081 static void r8169_phylink_handler(struct net_device *ndev)
6082 {
6083 	struct rtl8169_private *tp = netdev_priv(ndev);
6084 
6085 	if (netif_carrier_ok(ndev)) {
6086 		rtl_link_chg_patch(tp);
6087 		pm_request_resume(&tp->pci_dev->dev);
6088 	} else {
6089 		pm_runtime_idle(&tp->pci_dev->dev);
6090 	}
6091 
6092 	if (net_ratelimit())
6093 		phy_print_status(tp->phydev);
6094 }
6095 
6096 static int r8169_phy_connect(struct rtl8169_private *tp)
6097 {
6098 	struct phy_device *phydev = tp->phydev;
6099 	phy_interface_t phy_mode;
6100 	int ret;
6101 
6102 	phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
6103 		   PHY_INTERFACE_MODE_MII;
6104 
6105 	ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
6106 				 phy_mode);
6107 	if (ret)
6108 		return ret;
6109 
6110 	if (tp->supports_gmii)
6111 		phy_remove_link_mode(phydev,
6112 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
6113 	else
6114 		phy_set_max_speed(phydev, SPEED_100);
6115 
6116 	phy_support_asym_pause(phydev);
6117 
6118 	phy_attached_info(phydev);
6119 
6120 	return 0;
6121 }
6122 
6123 static void rtl8169_down(struct net_device *dev)
6124 {
6125 	struct rtl8169_private *tp = netdev_priv(dev);
6126 
6127 	phy_stop(tp->phydev);
6128 
6129 	napi_disable(&tp->napi);
6130 	netif_stop_queue(dev);
6131 
6132 	rtl8169_hw_reset(tp);
6133 	/*
6134 	 * At this point device interrupts can not be enabled in any function,
6135 	 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6136 	 * and napi is disabled (rtl8169_poll).
6137 	 */
6138 	rtl8169_rx_missed(dev);
6139 
6140 	/* Give a racing hard_start_xmit a few cycles to complete. */
6141 	synchronize_rcu();
6142 
6143 	rtl8169_tx_clear(tp);
6144 
6145 	rtl8169_rx_clear(tp);
6146 
6147 	rtl_pll_power_down(tp);
6148 }
6149 
6150 static int rtl8169_close(struct net_device *dev)
6151 {
6152 	struct rtl8169_private *tp = netdev_priv(dev);
6153 	struct pci_dev *pdev = tp->pci_dev;
6154 
6155 	pm_runtime_get_sync(&pdev->dev);
6156 
6157 	/* Update counters before going down */
6158 	rtl8169_update_counters(tp);
6159 
6160 	rtl_lock_work(tp);
6161 	/* Clear all task flags */
6162 	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
6163 
6164 	rtl8169_down(dev);
6165 	rtl_unlock_work(tp);
6166 
6167 	cancel_work_sync(&tp->wk.work);
6168 
6169 	phy_disconnect(tp->phydev);
6170 
6171 	pci_free_irq(pdev, 0, tp);
6172 
6173 	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6174 			  tp->RxPhyAddr);
6175 	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6176 			  tp->TxPhyAddr);
6177 	tp->TxDescArray = NULL;
6178 	tp->RxDescArray = NULL;
6179 
6180 	pm_runtime_put_sync(&pdev->dev);
6181 
6182 	return 0;
6183 }
6184 
6185 #ifdef CONFIG_NET_POLL_CONTROLLER
6186 static void rtl8169_netpoll(struct net_device *dev)
6187 {
6188 	struct rtl8169_private *tp = netdev_priv(dev);
6189 
6190 	rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
6191 }
6192 #endif
6193 
6194 static int rtl_open(struct net_device *dev)
6195 {
6196 	struct rtl8169_private *tp = netdev_priv(dev);
6197 	struct pci_dev *pdev = tp->pci_dev;
6198 	int retval = -ENOMEM;
6199 
6200 	pm_runtime_get_sync(&pdev->dev);
6201 
6202 	/*
6203 	 * Rx and Tx descriptors needs 256 bytes alignment.
6204 	 * dma_alloc_coherent provides more.
6205 	 */
6206 	tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6207 					     &tp->TxPhyAddr, GFP_KERNEL);
6208 	if (!tp->TxDescArray)
6209 		goto err_pm_runtime_put;
6210 
6211 	tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6212 					     &tp->RxPhyAddr, GFP_KERNEL);
6213 	if (!tp->RxDescArray)
6214 		goto err_free_tx_0;
6215 
6216 	retval = rtl8169_init_ring(tp);
6217 	if (retval < 0)
6218 		goto err_free_rx_1;
6219 
6220 	rtl_request_firmware(tp);
6221 
6222 	retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
6223 				 dev->name);
6224 	if (retval < 0)
6225 		goto err_release_fw_2;
6226 
6227 	retval = r8169_phy_connect(tp);
6228 	if (retval)
6229 		goto err_free_irq;
6230 
6231 	rtl_lock_work(tp);
6232 
6233 	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6234 
6235 	napi_enable(&tp->napi);
6236 
6237 	rtl8169_init_phy(dev, tp);
6238 
6239 	rtl_pll_power_up(tp);
6240 
6241 	rtl_hw_start(tp);
6242 
6243 	if (!rtl8169_init_counter_offsets(tp))
6244 		netif_warn(tp, hw, dev, "counter reset/update failed\n");
6245 
6246 	phy_start(tp->phydev);
6247 	netif_start_queue(dev);
6248 
6249 	rtl_unlock_work(tp);
6250 
6251 	pm_runtime_put_sync(&pdev->dev);
6252 out:
6253 	return retval;
6254 
6255 err_free_irq:
6256 	pci_free_irq(pdev, 0, tp);
6257 err_release_fw_2:
6258 	rtl_release_firmware(tp);
6259 	rtl8169_rx_clear(tp);
6260 err_free_rx_1:
6261 	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6262 			  tp->RxPhyAddr);
6263 	tp->RxDescArray = NULL;
6264 err_free_tx_0:
6265 	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6266 			  tp->TxPhyAddr);
6267 	tp->TxDescArray = NULL;
6268 err_pm_runtime_put:
6269 	pm_runtime_put_noidle(&pdev->dev);
6270 	goto out;
6271 }
6272 
6273 static void
6274 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6275 {
6276 	struct rtl8169_private *tp = netdev_priv(dev);
6277 	struct pci_dev *pdev = tp->pci_dev;
6278 	struct rtl8169_counters *counters = tp->counters;
6279 	unsigned int start;
6280 
6281 	pm_runtime_get_noresume(&pdev->dev);
6282 
6283 	if (netif_running(dev) && pm_runtime_active(&pdev->dev))
6284 		rtl8169_rx_missed(dev);
6285 
6286 	do {
6287 		start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
6288 		stats->rx_packets = tp->rx_stats.packets;
6289 		stats->rx_bytes	= tp->rx_stats.bytes;
6290 	} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
6291 
6292 	do {
6293 		start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
6294 		stats->tx_packets = tp->tx_stats.packets;
6295 		stats->tx_bytes	= tp->tx_stats.bytes;
6296 	} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
6297 
6298 	stats->rx_dropped	= dev->stats.rx_dropped;
6299 	stats->tx_dropped	= dev->stats.tx_dropped;
6300 	stats->rx_length_errors = dev->stats.rx_length_errors;
6301 	stats->rx_errors	= dev->stats.rx_errors;
6302 	stats->rx_crc_errors	= dev->stats.rx_crc_errors;
6303 	stats->rx_fifo_errors	= dev->stats.rx_fifo_errors;
6304 	stats->rx_missed_errors = dev->stats.rx_missed_errors;
6305 	stats->multicast	= dev->stats.multicast;
6306 
6307 	/*
6308 	 * Fetch additional counter values missing in stats collected by driver
6309 	 * from tally counters.
6310 	 */
6311 	if (pm_runtime_active(&pdev->dev))
6312 		rtl8169_update_counters(tp);
6313 
6314 	/*
6315 	 * Subtract values fetched during initalization.
6316 	 * See rtl8169_init_counter_offsets for a description why we do that.
6317 	 */
6318 	stats->tx_errors = le64_to_cpu(counters->tx_errors) -
6319 		le64_to_cpu(tp->tc_offset.tx_errors);
6320 	stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
6321 		le32_to_cpu(tp->tc_offset.tx_multi_collision);
6322 	stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
6323 		le16_to_cpu(tp->tc_offset.tx_aborted);
6324 
6325 	pm_runtime_put_noidle(&pdev->dev);
6326 }
6327 
6328 static void rtl8169_net_suspend(struct net_device *dev)
6329 {
6330 	struct rtl8169_private *tp = netdev_priv(dev);
6331 
6332 	if (!netif_running(dev))
6333 		return;
6334 
6335 	phy_stop(tp->phydev);
6336 	netif_device_detach(dev);
6337 
6338 	rtl_lock_work(tp);
6339 	napi_disable(&tp->napi);
6340 	/* Clear all task flags */
6341 	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
6342 
6343 	rtl_unlock_work(tp);
6344 
6345 	rtl_pll_power_down(tp);
6346 }
6347 
6348 #ifdef CONFIG_PM
6349 
6350 static int rtl8169_suspend(struct device *device)
6351 {
6352 	struct net_device *dev = dev_get_drvdata(device);
6353 	struct rtl8169_private *tp = netdev_priv(dev);
6354 
6355 	rtl8169_net_suspend(dev);
6356 	clk_disable_unprepare(tp->clk);
6357 
6358 	return 0;
6359 }
6360 
6361 static void __rtl8169_resume(struct net_device *dev)
6362 {
6363 	struct rtl8169_private *tp = netdev_priv(dev);
6364 
6365 	netif_device_attach(dev);
6366 
6367 	rtl_pll_power_up(tp);
6368 	rtl8169_init_phy(dev, tp);
6369 
6370 	phy_start(tp->phydev);
6371 
6372 	rtl_lock_work(tp);
6373 	napi_enable(&tp->napi);
6374 	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6375 	rtl_reset_work(tp);
6376 	rtl_unlock_work(tp);
6377 }
6378 
6379 static int rtl8169_resume(struct device *device)
6380 {
6381 	struct net_device *dev = dev_get_drvdata(device);
6382 	struct rtl8169_private *tp = netdev_priv(dev);
6383 
6384 	rtl_rar_set(tp, dev->dev_addr);
6385 
6386 	clk_prepare_enable(tp->clk);
6387 
6388 	if (netif_running(dev))
6389 		__rtl8169_resume(dev);
6390 
6391 	return 0;
6392 }
6393 
6394 static int rtl8169_runtime_suspend(struct device *device)
6395 {
6396 	struct net_device *dev = dev_get_drvdata(device);
6397 	struct rtl8169_private *tp = netdev_priv(dev);
6398 
6399 	if (!tp->TxDescArray)
6400 		return 0;
6401 
6402 	rtl_lock_work(tp);
6403 	__rtl8169_set_wol(tp, WAKE_ANY);
6404 	rtl_unlock_work(tp);
6405 
6406 	rtl8169_net_suspend(dev);
6407 
6408 	/* Update counters before going runtime suspend */
6409 	rtl8169_rx_missed(dev);
6410 	rtl8169_update_counters(tp);
6411 
6412 	return 0;
6413 }
6414 
6415 static int rtl8169_runtime_resume(struct device *device)
6416 {
6417 	struct net_device *dev = dev_get_drvdata(device);
6418 	struct rtl8169_private *tp = netdev_priv(dev);
6419 
6420 	rtl_rar_set(tp, dev->dev_addr);
6421 
6422 	if (!tp->TxDescArray)
6423 		return 0;
6424 
6425 	rtl_lock_work(tp);
6426 	__rtl8169_set_wol(tp, tp->saved_wolopts);
6427 	rtl_unlock_work(tp);
6428 
6429 	__rtl8169_resume(dev);
6430 
6431 	return 0;
6432 }
6433 
6434 static int rtl8169_runtime_idle(struct device *device)
6435 {
6436 	struct net_device *dev = dev_get_drvdata(device);
6437 
6438 	if (!netif_running(dev) || !netif_carrier_ok(dev))
6439 		pm_schedule_suspend(device, 10000);
6440 
6441 	return -EBUSY;
6442 }
6443 
6444 static const struct dev_pm_ops rtl8169_pm_ops = {
6445 	.suspend		= rtl8169_suspend,
6446 	.resume			= rtl8169_resume,
6447 	.freeze			= rtl8169_suspend,
6448 	.thaw			= rtl8169_resume,
6449 	.poweroff		= rtl8169_suspend,
6450 	.restore		= rtl8169_resume,
6451 	.runtime_suspend	= rtl8169_runtime_suspend,
6452 	.runtime_resume		= rtl8169_runtime_resume,
6453 	.runtime_idle		= rtl8169_runtime_idle,
6454 };
6455 
6456 #define RTL8169_PM_OPS	(&rtl8169_pm_ops)
6457 
6458 #else /* !CONFIG_PM */
6459 
6460 #define RTL8169_PM_OPS	NULL
6461 
6462 #endif /* !CONFIG_PM */
6463 
6464 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6465 {
6466 	/* WoL fails with 8168b when the receiver is disabled. */
6467 	switch (tp->mac_version) {
6468 	case RTL_GIGA_MAC_VER_11:
6469 	case RTL_GIGA_MAC_VER_12:
6470 	case RTL_GIGA_MAC_VER_17:
6471 		pci_clear_master(tp->pci_dev);
6472 
6473 		RTL_W8(tp, ChipCmd, CmdRxEnb);
6474 		/* PCI commit */
6475 		RTL_R8(tp, ChipCmd);
6476 		break;
6477 	default:
6478 		break;
6479 	}
6480 }
6481 
6482 static void rtl_shutdown(struct pci_dev *pdev)
6483 {
6484 	struct net_device *dev = pci_get_drvdata(pdev);
6485 	struct rtl8169_private *tp = netdev_priv(dev);
6486 
6487 	rtl8169_net_suspend(dev);
6488 
6489 	/* Restore original MAC address */
6490 	rtl_rar_set(tp, dev->perm_addr);
6491 
6492 	rtl8169_hw_reset(tp);
6493 
6494 	if (system_state == SYSTEM_POWER_OFF) {
6495 		if (tp->saved_wolopts) {
6496 			rtl_wol_suspend_quirk(tp);
6497 			rtl_wol_shutdown_quirk(tp);
6498 		}
6499 
6500 		pci_wake_from_d3(pdev, true);
6501 		pci_set_power_state(pdev, PCI_D3hot);
6502 	}
6503 }
6504 
6505 static void rtl_remove_one(struct pci_dev *pdev)
6506 {
6507 	struct net_device *dev = pci_get_drvdata(pdev);
6508 	struct rtl8169_private *tp = netdev_priv(dev);
6509 
6510 	if (r8168_check_dash(tp))
6511 		rtl8168_driver_stop(tp);
6512 
6513 	netif_napi_del(&tp->napi);
6514 
6515 	unregister_netdev(dev);
6516 	mdiobus_unregister(tp->phydev->mdio.bus);
6517 
6518 	rtl_release_firmware(tp);
6519 
6520 	if (pci_dev_run_wake(pdev))
6521 		pm_runtime_get_noresume(&pdev->dev);
6522 
6523 	/* restore original MAC address */
6524 	rtl_rar_set(tp, dev->perm_addr);
6525 }
6526 
6527 static const struct net_device_ops rtl_netdev_ops = {
6528 	.ndo_open		= rtl_open,
6529 	.ndo_stop		= rtl8169_close,
6530 	.ndo_get_stats64	= rtl8169_get_stats64,
6531 	.ndo_start_xmit		= rtl8169_start_xmit,
6532 	.ndo_features_check	= rtl8169_features_check,
6533 	.ndo_tx_timeout		= rtl8169_tx_timeout,
6534 	.ndo_validate_addr	= eth_validate_addr,
6535 	.ndo_change_mtu		= rtl8169_change_mtu,
6536 	.ndo_fix_features	= rtl8169_fix_features,
6537 	.ndo_set_features	= rtl8169_set_features,
6538 	.ndo_set_mac_address	= rtl_set_mac_address,
6539 	.ndo_do_ioctl		= rtl8169_ioctl,
6540 	.ndo_set_rx_mode	= rtl_set_rx_mode,
6541 #ifdef CONFIG_NET_POLL_CONTROLLER
6542 	.ndo_poll_controller	= rtl8169_netpoll,
6543 #endif
6544 
6545 };
6546 
6547 static void rtl_set_irq_mask(struct rtl8169_private *tp)
6548 {
6549 	tp->irq_mask = RTL_EVENT_NAPI | LinkChg;
6550 
6551 	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6552 		tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver;
6553 	else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
6554 		/* special workaround needed */
6555 		tp->irq_mask |= RxFIFOOver;
6556 	else
6557 		tp->irq_mask |= RxOverflow;
6558 }
6559 
6560 static int rtl_alloc_irq(struct rtl8169_private *tp)
6561 {
6562 	unsigned int flags;
6563 
6564 	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
6565 		rtl_unlock_config_regs(tp);
6566 		RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
6567 		rtl_lock_config_regs(tp);
6568 		flags = PCI_IRQ_LEGACY;
6569 	} else {
6570 		flags = PCI_IRQ_ALL_TYPES;
6571 	}
6572 
6573 	return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
6574 }
6575 
6576 static void rtl_read_mac_address(struct rtl8169_private *tp,
6577 				 u8 mac_addr[ETH_ALEN])
6578 {
6579 	/* Get MAC address */
6580 	if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) {
6581 		u32 value = rtl_eri_read(tp, 0xe0);
6582 
6583 		mac_addr[0] = (value >>  0) & 0xff;
6584 		mac_addr[1] = (value >>  8) & 0xff;
6585 		mac_addr[2] = (value >> 16) & 0xff;
6586 		mac_addr[3] = (value >> 24) & 0xff;
6587 
6588 		value = rtl_eri_read(tp, 0xe4);
6589 		mac_addr[4] = (value >>  0) & 0xff;
6590 		mac_addr[5] = (value >>  8) & 0xff;
6591 	}
6592 }
6593 
6594 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6595 {
6596 	return RTL_R8(tp, MCU) & LINK_LIST_RDY;
6597 }
6598 
6599 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6600 {
6601 	return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6602 }
6603 
6604 static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
6605 {
6606 	struct rtl8169_private *tp = mii_bus->priv;
6607 
6608 	if (phyaddr > 0)
6609 		return -ENODEV;
6610 
6611 	return rtl_readphy(tp, phyreg);
6612 }
6613 
6614 static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
6615 				int phyreg, u16 val)
6616 {
6617 	struct rtl8169_private *tp = mii_bus->priv;
6618 
6619 	if (phyaddr > 0)
6620 		return -ENODEV;
6621 
6622 	rtl_writephy(tp, phyreg, val);
6623 
6624 	return 0;
6625 }
6626 
6627 static int r8169_mdio_register(struct rtl8169_private *tp)
6628 {
6629 	struct pci_dev *pdev = tp->pci_dev;
6630 	struct mii_bus *new_bus;
6631 	int ret;
6632 
6633 	new_bus = devm_mdiobus_alloc(&pdev->dev);
6634 	if (!new_bus)
6635 		return -ENOMEM;
6636 
6637 	new_bus->name = "r8169";
6638 	new_bus->priv = tp;
6639 	new_bus->parent = &pdev->dev;
6640 	new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
6641 	snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
6642 
6643 	new_bus->read = r8169_mdio_read_reg;
6644 	new_bus->write = r8169_mdio_write_reg;
6645 
6646 	ret = mdiobus_register(new_bus);
6647 	if (ret)
6648 		return ret;
6649 
6650 	tp->phydev = mdiobus_get_phy(new_bus, 0);
6651 	if (!tp->phydev) {
6652 		mdiobus_unregister(new_bus);
6653 		return -ENODEV;
6654 	}
6655 
6656 	/* PHY will be woken up in rtl_open() */
6657 	phy_suspend(tp->phydev);
6658 
6659 	return 0;
6660 }
6661 
6662 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6663 {
6664 	u32 data;
6665 
6666 	tp->ocp_base = OCP_STD_PHY_BASE;
6667 
6668 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
6669 
6670 	if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6671 		return;
6672 
6673 	if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6674 		return;
6675 
6676 	RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6677 	msleep(1);
6678 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
6679 
6680 	data = r8168_mac_ocp_read(tp, 0xe8de);
6681 	data &= ~(1 << 14);
6682 	r8168_mac_ocp_write(tp, 0xe8de, data);
6683 
6684 	if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6685 		return;
6686 
6687 	data = r8168_mac_ocp_read(tp, 0xe8de);
6688 	data |= (1 << 15);
6689 	r8168_mac_ocp_write(tp, 0xe8de, data);
6690 
6691 	rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
6692 }
6693 
6694 static void rtl_hw_initialize(struct rtl8169_private *tp)
6695 {
6696 	switch (tp->mac_version) {
6697 	case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
6698 		rtl8168ep_stop_cmac(tp);
6699 		/* fall through */
6700 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
6701 		rtl_hw_init_8168g(tp);
6702 		break;
6703 	default:
6704 		break;
6705 	}
6706 }
6707 
6708 static int rtl_jumbo_max(struct rtl8169_private *tp)
6709 {
6710 	/* Non-GBit versions don't support jumbo frames */
6711 	if (!tp->supports_gmii)
6712 		return JUMBO_1K;
6713 
6714 	switch (tp->mac_version) {
6715 	/* RTL8169 */
6716 	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
6717 		return JUMBO_7K;
6718 	/* RTL8168b */
6719 	case RTL_GIGA_MAC_VER_11:
6720 	case RTL_GIGA_MAC_VER_12:
6721 	case RTL_GIGA_MAC_VER_17:
6722 		return JUMBO_4K;
6723 	/* RTL8168c */
6724 	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
6725 		return JUMBO_6K;
6726 	default:
6727 		return JUMBO_9K;
6728 	}
6729 }
6730 
6731 static void rtl_disable_clk(void *data)
6732 {
6733 	clk_disable_unprepare(data);
6734 }
6735 
6736 static int rtl_get_ether_clk(struct rtl8169_private *tp)
6737 {
6738 	struct device *d = tp_to_dev(tp);
6739 	struct clk *clk;
6740 	int rc;
6741 
6742 	clk = devm_clk_get(d, "ether_clk");
6743 	if (IS_ERR(clk)) {
6744 		rc = PTR_ERR(clk);
6745 		if (rc == -ENOENT)
6746 			/* clk-core allows NULL (for suspend / resume) */
6747 			rc = 0;
6748 		else if (rc != -EPROBE_DEFER)
6749 			dev_err(d, "failed to get clk: %d\n", rc);
6750 	} else {
6751 		tp->clk = clk;
6752 		rc = clk_prepare_enable(clk);
6753 		if (rc)
6754 			dev_err(d, "failed to enable clk: %d\n", rc);
6755 		else
6756 			rc = devm_add_action_or_reset(d, rtl_disable_clk, clk);
6757 	}
6758 
6759 	return rc;
6760 }
6761 
6762 static void rtl_init_mac_address(struct rtl8169_private *tp)
6763 {
6764 	struct net_device *dev = tp->dev;
6765 	u8 *mac_addr = dev->dev_addr;
6766 	int rc, i;
6767 
6768 	rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
6769 	if (!rc)
6770 		goto done;
6771 
6772 	rtl_read_mac_address(tp, mac_addr);
6773 	if (is_valid_ether_addr(mac_addr))
6774 		goto done;
6775 
6776 	for (i = 0; i < ETH_ALEN; i++)
6777 		mac_addr[i] = RTL_R8(tp, MAC0 + i);
6778 	if (is_valid_ether_addr(mac_addr))
6779 		goto done;
6780 
6781 	eth_hw_addr_random(dev);
6782 	dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
6783 done:
6784 	rtl_rar_set(tp, mac_addr);
6785 }
6786 
6787 static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6788 {
6789 	struct rtl8169_private *tp;
6790 	struct net_device *dev;
6791 	int chipset, region;
6792 	int jumbo_max, rc;
6793 
6794 	dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
6795 	if (!dev)
6796 		return -ENOMEM;
6797 
6798 	SET_NETDEV_DEV(dev, &pdev->dev);
6799 	dev->netdev_ops = &rtl_netdev_ops;
6800 	tp = netdev_priv(dev);
6801 	tp->dev = dev;
6802 	tp->pci_dev = pdev;
6803 	tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6804 	tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
6805 
6806 	/* Get the *optional* external "ether_clk" used on some boards */
6807 	rc = rtl_get_ether_clk(tp);
6808 	if (rc)
6809 		return rc;
6810 
6811 	/* Disable ASPM completely as that cause random device stop working
6812 	 * problems as well as full system hangs for some PCIe devices users.
6813 	 */
6814 	rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
6815 					  PCIE_LINK_STATE_L1);
6816 	tp->aspm_manageable = !rc;
6817 
6818 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
6819 	rc = pcim_enable_device(pdev);
6820 	if (rc < 0) {
6821 		dev_err(&pdev->dev, "enable failure\n");
6822 		return rc;
6823 	}
6824 
6825 	if (pcim_set_mwi(pdev) < 0)
6826 		dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n");
6827 
6828 	/* use first MMIO region */
6829 	region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1;
6830 	if (region < 0) {
6831 		dev_err(&pdev->dev, "no MMIO resource found\n");
6832 		return -ENODEV;
6833 	}
6834 
6835 	/* check for weird/broken PCI region reporting */
6836 	if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6837 		dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
6838 		return -ENODEV;
6839 	}
6840 
6841 	rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME);
6842 	if (rc < 0) {
6843 		dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
6844 		return rc;
6845 	}
6846 
6847 	tp->mmio_addr = pcim_iomap_table(pdev)[region];
6848 
6849 	/* Identify chip attached to board */
6850 	rtl8169_get_mac_version(tp);
6851 	if (tp->mac_version == RTL_GIGA_MAC_NONE)
6852 		return -ENODEV;
6853 
6854 	tp->cp_cmd = RTL_R16(tp, CPlusCmd);
6855 
6856 	if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
6857 	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
6858 		dev->features |= NETIF_F_HIGHDMA;
6859 
6860 	rtl_init_rxcfg(tp);
6861 
6862 	rtl8169_irq_mask_and_ack(tp);
6863 
6864 	rtl_hw_initialize(tp);
6865 
6866 	rtl_hw_reset(tp);
6867 
6868 	pci_set_master(pdev);
6869 
6870 	chipset = tp->mac_version;
6871 
6872 	rc = rtl_alloc_irq(tp);
6873 	if (rc < 0) {
6874 		dev_err(&pdev->dev, "Can't allocate interrupt\n");
6875 		return rc;
6876 	}
6877 
6878 	mutex_init(&tp->wk.mutex);
6879 	INIT_WORK(&tp->wk.work, rtl_task);
6880 	u64_stats_init(&tp->rx_stats.syncp);
6881 	u64_stats_init(&tp->tx_stats.syncp);
6882 
6883 	rtl_init_mac_address(tp);
6884 
6885 	dev->ethtool_ops = &rtl8169_ethtool_ops;
6886 
6887 	netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
6888 
6889 	dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6890 		NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
6891 		NETIF_F_HW_VLAN_CTAG_RX;
6892 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6893 		NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
6894 		NETIF_F_HW_VLAN_CTAG_RX;
6895 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6896 		NETIF_F_HIGHDMA;
6897 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
6898 
6899 	tp->cp_cmd |= RxChkSum | RxVlan;
6900 
6901 	/*
6902 	 * Pretend we are using VLANs; This bypasses a nasty bug where
6903 	 * Interrupts stop flowing on high load on 8110SCd controllers.
6904 	 */
6905 	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6906 		/* Disallow toggling */
6907 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
6908 
6909 	if (rtl_chip_supports_csum_v2(tp)) {
6910 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
6911 		dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
6912 		dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
6913 		dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
6914 	} else {
6915 		dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
6916 		dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
6917 	}
6918 
6919 	/* RTL8168e-vl has a HW issue with TSO */
6920 	if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
6921 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
6922 		dev->hw_features &= ~NETIF_F_ALL_TSO;
6923 		dev->features &= ~NETIF_F_ALL_TSO;
6924 	}
6925 
6926 	dev->hw_features |= NETIF_F_RXALL;
6927 	dev->hw_features |= NETIF_F_RXFCS;
6928 
6929 	/* MTU range: 60 - hw-specific max */
6930 	dev->min_mtu = ETH_ZLEN;
6931 	jumbo_max = rtl_jumbo_max(tp);
6932 	dev->max_mtu = jumbo_max;
6933 
6934 	rtl_set_irq_mask(tp);
6935 
6936 	tp->fw_name = rtl_chip_infos[chipset].fw_name;
6937 
6938 	tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
6939 					    &tp->counters_phys_addr,
6940 					    GFP_KERNEL);
6941 	if (!tp->counters)
6942 		return -ENOMEM;
6943 
6944 	pci_set_drvdata(pdev, dev);
6945 
6946 	rc = r8169_mdio_register(tp);
6947 	if (rc)
6948 		return rc;
6949 
6950 	/* chip gets powered up in rtl_open() */
6951 	rtl_pll_power_down(tp);
6952 
6953 	rc = register_netdev(dev);
6954 	if (rc)
6955 		goto err_mdio_unregister;
6956 
6957 	netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
6958 		   rtl_chip_infos[chipset].name, dev->dev_addr,
6959 		   (RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
6960 		   pci_irq_vector(pdev, 0));
6961 
6962 	if (jumbo_max > JUMBO_1K)
6963 		netif_info(tp, probe, dev,
6964 			   "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
6965 			   jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
6966 			   "ok" : "ko");
6967 
6968 	if (r8168_check_dash(tp))
6969 		rtl8168_driver_start(tp);
6970 
6971 	if (pci_dev_run_wake(pdev))
6972 		pm_runtime_put_sync(&pdev->dev);
6973 
6974 	return 0;
6975 
6976 err_mdio_unregister:
6977 	mdiobus_unregister(tp->phydev->mdio.bus);
6978 	return rc;
6979 }
6980 
6981 static struct pci_driver rtl8169_pci_driver = {
6982 	.name		= MODULENAME,
6983 	.id_table	= rtl8169_pci_tbl,
6984 	.probe		= rtl_init_one,
6985 	.remove		= rtl_remove_one,
6986 	.shutdown	= rtl_shutdown,
6987 	.driver.pm	= RTL8169_PM_OPS,
6988 };
6989 
6990 module_pci_driver(rtl8169_pci_driver);
6991