xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision bcda5fd3)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
50 #define DEFAULT_BULK_IN_DELAY		(0x0800)
51 #define MAX_SINGLE_PACKET_SIZE		(9000)
52 #define DEFAULT_TX_CSUM_ENABLE		(true)
53 #define DEFAULT_RX_CSUM_ENABLE		(true)
54 #define DEFAULT_TSO_CSUM_ENABLE		(true)
55 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
56 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
57 #define TX_OVERHEAD			(8)
58 #define RXW_PADDING			2
59 
60 #define LAN78XX_USB_VENDOR_ID		(0x0424)
61 #define LAN7800_USB_PRODUCT_ID		(0x7800)
62 #define LAN7850_USB_PRODUCT_ID		(0x7850)
63 #define LAN7801_USB_PRODUCT_ID		(0x7801)
64 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
65 #define LAN78XX_OTP_MAGIC		(0x78F3)
66 
67 #define	MII_READ			1
68 #define	MII_WRITE			0
69 
70 #define EEPROM_INDICATOR		(0xA5)
71 #define EEPROM_MAC_OFFSET		(0x01)
72 #define MAX_EEPROM_SIZE			512
73 #define OTP_INDICATOR_1			(0xF3)
74 #define OTP_INDICATOR_2			(0xF7)
75 
76 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
77 					 WAKE_MCAST | WAKE_BCAST | \
78 					 WAKE_ARP | WAKE_MAGIC)
79 
80 /* USB related defines */
81 #define BULK_IN_PIPE			1
82 #define BULK_OUT_PIPE			2
83 
84 /* default autosuspend delay (mSec)*/
85 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
86 
87 /* statistic update interval (mSec) */
88 #define STAT_UPDATE_TIMER		(1 * 1000)
89 
90 /* defines interrupts from interrupt EP */
91 #define MAX_INT_EP			(32)
92 #define INT_EP_INTEP			(31)
93 #define INT_EP_OTP_WR_DONE		(28)
94 #define INT_EP_EEE_TX_LPI_START		(26)
95 #define INT_EP_EEE_TX_LPI_STOP		(25)
96 #define INT_EP_EEE_RX_LPI		(24)
97 #define INT_EP_MAC_RESET_TIMEOUT	(23)
98 #define INT_EP_RDFO			(22)
99 #define INT_EP_TXE			(21)
100 #define INT_EP_USB_STATUS		(20)
101 #define INT_EP_TX_DIS			(19)
102 #define INT_EP_RX_DIS			(18)
103 #define INT_EP_PHY			(17)
104 #define INT_EP_DP			(16)
105 #define INT_EP_MAC_ERR			(15)
106 #define INT_EP_TDFU			(14)
107 #define INT_EP_TDFO			(13)
108 #define INT_EP_UTX			(12)
109 #define INT_EP_GPIO_11			(11)
110 #define INT_EP_GPIO_10			(10)
111 #define INT_EP_GPIO_9			(9)
112 #define INT_EP_GPIO_8			(8)
113 #define INT_EP_GPIO_7			(7)
114 #define INT_EP_GPIO_6			(6)
115 #define INT_EP_GPIO_5			(5)
116 #define INT_EP_GPIO_4			(4)
117 #define INT_EP_GPIO_3			(3)
118 #define INT_EP_GPIO_2			(2)
119 #define INT_EP_GPIO_1			(1)
120 #define INT_EP_GPIO_0			(0)
121 
122 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
123 	"RX FCS Errors",
124 	"RX Alignment Errors",
125 	"Rx Fragment Errors",
126 	"RX Jabber Errors",
127 	"RX Undersize Frame Errors",
128 	"RX Oversize Frame Errors",
129 	"RX Dropped Frames",
130 	"RX Unicast Byte Count",
131 	"RX Broadcast Byte Count",
132 	"RX Multicast Byte Count",
133 	"RX Unicast Frames",
134 	"RX Broadcast Frames",
135 	"RX Multicast Frames",
136 	"RX Pause Frames",
137 	"RX 64 Byte Frames",
138 	"RX 65 - 127 Byte Frames",
139 	"RX 128 - 255 Byte Frames",
140 	"RX 256 - 511 Bytes Frames",
141 	"RX 512 - 1023 Byte Frames",
142 	"RX 1024 - 1518 Byte Frames",
143 	"RX Greater 1518 Byte Frames",
144 	"EEE RX LPI Transitions",
145 	"EEE RX LPI Time",
146 	"TX FCS Errors",
147 	"TX Excess Deferral Errors",
148 	"TX Carrier Errors",
149 	"TX Bad Byte Count",
150 	"TX Single Collisions",
151 	"TX Multiple Collisions",
152 	"TX Excessive Collision",
153 	"TX Late Collisions",
154 	"TX Unicast Byte Count",
155 	"TX Broadcast Byte Count",
156 	"TX Multicast Byte Count",
157 	"TX Unicast Frames",
158 	"TX Broadcast Frames",
159 	"TX Multicast Frames",
160 	"TX Pause Frames",
161 	"TX 64 Byte Frames",
162 	"TX 65 - 127 Byte Frames",
163 	"TX 128 - 255 Byte Frames",
164 	"TX 256 - 511 Bytes Frames",
165 	"TX 512 - 1023 Byte Frames",
166 	"TX 1024 - 1518 Byte Frames",
167 	"TX Greater 1518 Byte Frames",
168 	"EEE TX LPI Transitions",
169 	"EEE TX LPI Time",
170 };
171 
172 struct lan78xx_statstage {
173 	u32 rx_fcs_errors;
174 	u32 rx_alignment_errors;
175 	u32 rx_fragment_errors;
176 	u32 rx_jabber_errors;
177 	u32 rx_undersize_frame_errors;
178 	u32 rx_oversize_frame_errors;
179 	u32 rx_dropped_frames;
180 	u32 rx_unicast_byte_count;
181 	u32 rx_broadcast_byte_count;
182 	u32 rx_multicast_byte_count;
183 	u32 rx_unicast_frames;
184 	u32 rx_broadcast_frames;
185 	u32 rx_multicast_frames;
186 	u32 rx_pause_frames;
187 	u32 rx_64_byte_frames;
188 	u32 rx_65_127_byte_frames;
189 	u32 rx_128_255_byte_frames;
190 	u32 rx_256_511_bytes_frames;
191 	u32 rx_512_1023_byte_frames;
192 	u32 rx_1024_1518_byte_frames;
193 	u32 rx_greater_1518_byte_frames;
194 	u32 eee_rx_lpi_transitions;
195 	u32 eee_rx_lpi_time;
196 	u32 tx_fcs_errors;
197 	u32 tx_excess_deferral_errors;
198 	u32 tx_carrier_errors;
199 	u32 tx_bad_byte_count;
200 	u32 tx_single_collisions;
201 	u32 tx_multiple_collisions;
202 	u32 tx_excessive_collision;
203 	u32 tx_late_collisions;
204 	u32 tx_unicast_byte_count;
205 	u32 tx_broadcast_byte_count;
206 	u32 tx_multicast_byte_count;
207 	u32 tx_unicast_frames;
208 	u32 tx_broadcast_frames;
209 	u32 tx_multicast_frames;
210 	u32 tx_pause_frames;
211 	u32 tx_64_byte_frames;
212 	u32 tx_65_127_byte_frames;
213 	u32 tx_128_255_byte_frames;
214 	u32 tx_256_511_bytes_frames;
215 	u32 tx_512_1023_byte_frames;
216 	u32 tx_1024_1518_byte_frames;
217 	u32 tx_greater_1518_byte_frames;
218 	u32 eee_tx_lpi_transitions;
219 	u32 eee_tx_lpi_time;
220 };
221 
222 struct lan78xx_statstage64 {
223 	u64 rx_fcs_errors;
224 	u64 rx_alignment_errors;
225 	u64 rx_fragment_errors;
226 	u64 rx_jabber_errors;
227 	u64 rx_undersize_frame_errors;
228 	u64 rx_oversize_frame_errors;
229 	u64 rx_dropped_frames;
230 	u64 rx_unicast_byte_count;
231 	u64 rx_broadcast_byte_count;
232 	u64 rx_multicast_byte_count;
233 	u64 rx_unicast_frames;
234 	u64 rx_broadcast_frames;
235 	u64 rx_multicast_frames;
236 	u64 rx_pause_frames;
237 	u64 rx_64_byte_frames;
238 	u64 rx_65_127_byte_frames;
239 	u64 rx_128_255_byte_frames;
240 	u64 rx_256_511_bytes_frames;
241 	u64 rx_512_1023_byte_frames;
242 	u64 rx_1024_1518_byte_frames;
243 	u64 rx_greater_1518_byte_frames;
244 	u64 eee_rx_lpi_transitions;
245 	u64 eee_rx_lpi_time;
246 	u64 tx_fcs_errors;
247 	u64 tx_excess_deferral_errors;
248 	u64 tx_carrier_errors;
249 	u64 tx_bad_byte_count;
250 	u64 tx_single_collisions;
251 	u64 tx_multiple_collisions;
252 	u64 tx_excessive_collision;
253 	u64 tx_late_collisions;
254 	u64 tx_unicast_byte_count;
255 	u64 tx_broadcast_byte_count;
256 	u64 tx_multicast_byte_count;
257 	u64 tx_unicast_frames;
258 	u64 tx_broadcast_frames;
259 	u64 tx_multicast_frames;
260 	u64 tx_pause_frames;
261 	u64 tx_64_byte_frames;
262 	u64 tx_65_127_byte_frames;
263 	u64 tx_128_255_byte_frames;
264 	u64 tx_256_511_bytes_frames;
265 	u64 tx_512_1023_byte_frames;
266 	u64 tx_1024_1518_byte_frames;
267 	u64 tx_greater_1518_byte_frames;
268 	u64 eee_tx_lpi_transitions;
269 	u64 eee_tx_lpi_time;
270 };
271 
272 static u32 lan78xx_regs[] = {
273 	ID_REV,
274 	INT_STS,
275 	HW_CFG,
276 	PMT_CTL,
277 	E2P_CMD,
278 	E2P_DATA,
279 	USB_STATUS,
280 	VLAN_TYPE,
281 	MAC_CR,
282 	MAC_RX,
283 	MAC_TX,
284 	FLOW,
285 	ERR_STS,
286 	MII_ACC,
287 	MII_DATA,
288 	EEE_TX_LPI_REQ_DLY,
289 	EEE_TW_TX_SYS,
290 	EEE_TX_LPI_REM_DLY,
291 	WUCSR
292 };
293 
294 #define PHY_REG_SIZE (32 * sizeof(u32))
295 
296 struct lan78xx_net;
297 
298 struct lan78xx_priv {
299 	struct lan78xx_net *dev;
300 	u32 rfe_ctl;
301 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
302 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
303 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
304 	struct mutex dataport_mutex; /* for dataport access */
305 	spinlock_t rfe_ctl_lock; /* for rfe register access */
306 	struct work_struct set_multicast;
307 	struct work_struct set_vlan;
308 	u32 wol;
309 };
310 
311 enum skb_state {
312 	illegal = 0,
313 	tx_start,
314 	tx_done,
315 	rx_start,
316 	rx_done,
317 	rx_cleanup,
318 	unlink_start
319 };
320 
321 struct skb_data {		/* skb->cb is one of these */
322 	struct urb *urb;
323 	struct lan78xx_net *dev;
324 	enum skb_state state;
325 	size_t length;
326 	int num_of_packet;
327 };
328 
329 struct usb_context {
330 	struct usb_ctrlrequest req;
331 	struct lan78xx_net *dev;
332 };
333 
334 #define EVENT_TX_HALT			0
335 #define EVENT_RX_HALT			1
336 #define EVENT_RX_MEMORY			2
337 #define EVENT_STS_SPLIT			3
338 #define EVENT_LINK_RESET		4
339 #define EVENT_RX_PAUSED			5
340 #define EVENT_DEV_WAKING		6
341 #define EVENT_DEV_ASLEEP		7
342 #define EVENT_DEV_OPEN			8
343 #define EVENT_STAT_UPDATE		9
344 
345 struct statstage {
346 	struct mutex			access_lock;	/* for stats access */
347 	struct lan78xx_statstage	saved;
348 	struct lan78xx_statstage	rollover_count;
349 	struct lan78xx_statstage	rollover_max;
350 	struct lan78xx_statstage64	curr_stat;
351 };
352 
353 struct irq_domain_data {
354 	struct irq_domain	*irqdomain;
355 	unsigned int		phyirq;
356 	struct irq_chip		*irqchip;
357 	irq_flow_handler_t	irq_handler;
358 	u32			irqenable;
359 	struct mutex		irq_lock;		/* for irq bus access */
360 };
361 
362 struct lan78xx_net {
363 	struct net_device	*net;
364 	struct usb_device	*udev;
365 	struct usb_interface	*intf;
366 	void			*driver_priv;
367 
368 	int			rx_qlen;
369 	int			tx_qlen;
370 	struct sk_buff_head	rxq;
371 	struct sk_buff_head	txq;
372 	struct sk_buff_head	done;
373 	struct sk_buff_head	rxq_pause;
374 	struct sk_buff_head	txq_pend;
375 
376 	struct tasklet_struct	bh;
377 	struct delayed_work	wq;
378 
379 	int			msg_enable;
380 
381 	struct urb		*urb_intr;
382 	struct usb_anchor	deferred;
383 
384 	struct mutex		phy_mutex; /* for phy access */
385 	unsigned		pipe_in, pipe_out, pipe_intr;
386 
387 	u32			hard_mtu;	/* count any extra framing */
388 	size_t			rx_urb_size;	/* size for rx urbs */
389 
390 	unsigned long		flags;
391 
392 	wait_queue_head_t	*wait;
393 	unsigned char		suspend_count;
394 
395 	unsigned		maxpacket;
396 	struct timer_list	delay;
397 	struct timer_list	stat_monitor;
398 
399 	unsigned long		data[5];
400 
401 	int			link_on;
402 	u8			mdix_ctrl;
403 
404 	u32			chipid;
405 	u32			chiprev;
406 	struct mii_bus		*mdiobus;
407 	phy_interface_t		interface;
408 
409 	int			fc_autoneg;
410 	u8			fc_request_control;
411 
412 	int			delta;
413 	struct statstage	stats;
414 
415 	struct irq_domain_data	domain_data;
416 };
417 
418 /* define external phy id */
419 #define	PHY_LAN8835			(0x0007C130)
420 #define	PHY_KSZ9031RNX			(0x00221620)
421 
422 /* use ethtool to change the level for any given device */
423 static int msg_level = -1;
424 module_param(msg_level, int, 0);
425 MODULE_PARM_DESC(msg_level, "Override default message level");
426 
427 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
428 {
429 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
430 	int ret;
431 
432 	if (!buf)
433 		return -ENOMEM;
434 
435 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
436 			      USB_VENDOR_REQUEST_READ_REGISTER,
437 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
438 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
439 	if (likely(ret >= 0)) {
440 		le32_to_cpus(buf);
441 		*data = *buf;
442 	} else {
443 		netdev_warn(dev->net,
444 			    "Failed to read register index 0x%08x. ret = %d",
445 			    index, ret);
446 	}
447 
448 	kfree(buf);
449 
450 	return ret;
451 }
452 
453 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
454 {
455 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
456 	int ret;
457 
458 	if (!buf)
459 		return -ENOMEM;
460 
461 	*buf = data;
462 	cpu_to_le32s(buf);
463 
464 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
465 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
466 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
467 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
468 	if (unlikely(ret < 0)) {
469 		netdev_warn(dev->net,
470 			    "Failed to write register index 0x%08x. ret = %d",
471 			    index, ret);
472 	}
473 
474 	kfree(buf);
475 
476 	return ret;
477 }
478 
479 static int lan78xx_read_stats(struct lan78xx_net *dev,
480 			      struct lan78xx_statstage *data)
481 {
482 	int ret = 0;
483 	int i;
484 	struct lan78xx_statstage *stats;
485 	u32 *src;
486 	u32 *dst;
487 
488 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
489 	if (!stats)
490 		return -ENOMEM;
491 
492 	ret = usb_control_msg(dev->udev,
493 			      usb_rcvctrlpipe(dev->udev, 0),
494 			      USB_VENDOR_REQUEST_GET_STATS,
495 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
496 			      0,
497 			      0,
498 			      (void *)stats,
499 			      sizeof(*stats),
500 			      USB_CTRL_SET_TIMEOUT);
501 	if (likely(ret >= 0)) {
502 		src = (u32 *)stats;
503 		dst = (u32 *)data;
504 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
505 			le32_to_cpus(&src[i]);
506 			dst[i] = src[i];
507 		}
508 	} else {
509 		netdev_warn(dev->net,
510 			    "Failed to read stat ret = %d", ret);
511 	}
512 
513 	kfree(stats);
514 
515 	return ret;
516 }
517 
518 #define check_counter_rollover(struct1, dev_stats, member) {	\
519 	if (struct1->member < dev_stats.saved.member)		\
520 		dev_stats.rollover_count.member++;		\
521 	}
522 
523 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
524 					struct lan78xx_statstage *stats)
525 {
526 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
527 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
528 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
529 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
530 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
531 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
532 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
533 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
534 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
535 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
536 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
537 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
538 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
539 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
540 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
541 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
542 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
543 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
544 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
545 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
546 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
547 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
548 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
549 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
550 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
551 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
552 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
553 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
554 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
555 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
556 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
557 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
558 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
559 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
560 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
561 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
562 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
563 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
564 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
565 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
566 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
567 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
568 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
569 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
570 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
571 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
572 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
573 
574 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
575 }
576 
577 static void lan78xx_update_stats(struct lan78xx_net *dev)
578 {
579 	u32 *p, *count, *max;
580 	u64 *data;
581 	int i;
582 	struct lan78xx_statstage lan78xx_stats;
583 
584 	if (usb_autopm_get_interface(dev->intf) < 0)
585 		return;
586 
587 	p = (u32 *)&lan78xx_stats;
588 	count = (u32 *)&dev->stats.rollover_count;
589 	max = (u32 *)&dev->stats.rollover_max;
590 	data = (u64 *)&dev->stats.curr_stat;
591 
592 	mutex_lock(&dev->stats.access_lock);
593 
594 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
595 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
596 
597 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
598 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
599 
600 	mutex_unlock(&dev->stats.access_lock);
601 
602 	usb_autopm_put_interface(dev->intf);
603 }
604 
605 /* Loop until the read is completed with timeout called with phy_mutex held */
606 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
607 {
608 	unsigned long start_time = jiffies;
609 	u32 val;
610 	int ret;
611 
612 	do {
613 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
614 		if (unlikely(ret < 0))
615 			return -EIO;
616 
617 		if (!(val & MII_ACC_MII_BUSY_))
618 			return 0;
619 	} while (!time_after(jiffies, start_time + HZ));
620 
621 	return -EIO;
622 }
623 
624 static inline u32 mii_access(int id, int index, int read)
625 {
626 	u32 ret;
627 
628 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
629 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
630 	if (read)
631 		ret |= MII_ACC_MII_READ_;
632 	else
633 		ret |= MII_ACC_MII_WRITE_;
634 	ret |= MII_ACC_MII_BUSY_;
635 
636 	return ret;
637 }
638 
639 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
640 {
641 	unsigned long start_time = jiffies;
642 	u32 val;
643 	int ret;
644 
645 	do {
646 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
647 		if (unlikely(ret < 0))
648 			return -EIO;
649 
650 		if (!(val & E2P_CMD_EPC_BUSY_) ||
651 		    (val & E2P_CMD_EPC_TIMEOUT_))
652 			break;
653 		usleep_range(40, 100);
654 	} while (!time_after(jiffies, start_time + HZ));
655 
656 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
657 		netdev_warn(dev->net, "EEPROM read operation timeout");
658 		return -EIO;
659 	}
660 
661 	return 0;
662 }
663 
664 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
665 {
666 	unsigned long start_time = jiffies;
667 	u32 val;
668 	int ret;
669 
670 	do {
671 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
672 		if (unlikely(ret < 0))
673 			return -EIO;
674 
675 		if (!(val & E2P_CMD_EPC_BUSY_))
676 			return 0;
677 
678 		usleep_range(40, 100);
679 	} while (!time_after(jiffies, start_time + HZ));
680 
681 	netdev_warn(dev->net, "EEPROM is busy");
682 	return -EIO;
683 }
684 
685 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
686 				   u32 length, u8 *data)
687 {
688 	u32 val;
689 	u32 saved;
690 	int i, ret;
691 	int retval;
692 
693 	/* depends on chip, some EEPROM pins are muxed with LED function.
694 	 * disable & restore LED function to access EEPROM.
695 	 */
696 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
697 	saved = val;
698 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
699 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
700 		ret = lan78xx_write_reg(dev, HW_CFG, val);
701 	}
702 
703 	retval = lan78xx_eeprom_confirm_not_busy(dev);
704 	if (retval)
705 		return retval;
706 
707 	for (i = 0; i < length; i++) {
708 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
709 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
710 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
711 		if (unlikely(ret < 0)) {
712 			retval = -EIO;
713 			goto exit;
714 		}
715 
716 		retval = lan78xx_wait_eeprom(dev);
717 		if (retval < 0)
718 			goto exit;
719 
720 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
721 		if (unlikely(ret < 0)) {
722 			retval = -EIO;
723 			goto exit;
724 		}
725 
726 		data[i] = val & 0xFF;
727 		offset++;
728 	}
729 
730 	retval = 0;
731 exit:
732 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
733 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
734 
735 	return retval;
736 }
737 
738 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
739 			       u32 length, u8 *data)
740 {
741 	u8 sig;
742 	int ret;
743 
744 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
745 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
746 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
747 	else
748 		ret = -EINVAL;
749 
750 	return ret;
751 }
752 
753 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
754 				    u32 length, u8 *data)
755 {
756 	u32 val;
757 	u32 saved;
758 	int i, ret;
759 	int retval;
760 
761 	/* depends on chip, some EEPROM pins are muxed with LED function.
762 	 * disable & restore LED function to access EEPROM.
763 	 */
764 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
765 	saved = val;
766 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
767 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
768 		ret = lan78xx_write_reg(dev, HW_CFG, val);
769 	}
770 
771 	retval = lan78xx_eeprom_confirm_not_busy(dev);
772 	if (retval)
773 		goto exit;
774 
775 	/* Issue write/erase enable command */
776 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
777 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
778 	if (unlikely(ret < 0)) {
779 		retval = -EIO;
780 		goto exit;
781 	}
782 
783 	retval = lan78xx_wait_eeprom(dev);
784 	if (retval < 0)
785 		goto exit;
786 
787 	for (i = 0; i < length; i++) {
788 		/* Fill data register */
789 		val = data[i];
790 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
791 		if (ret < 0) {
792 			retval = -EIO;
793 			goto exit;
794 		}
795 
796 		/* Send "write" command */
797 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
798 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
799 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
800 		if (ret < 0) {
801 			retval = -EIO;
802 			goto exit;
803 		}
804 
805 		retval = lan78xx_wait_eeprom(dev);
806 		if (retval < 0)
807 			goto exit;
808 
809 		offset++;
810 	}
811 
812 	retval = 0;
813 exit:
814 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
815 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
816 
817 	return retval;
818 }
819 
820 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
821 				u32 length, u8 *data)
822 {
823 	int i;
824 	u32 buf;
825 	unsigned long timeout;
826 
827 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
828 
829 	if (buf & OTP_PWR_DN_PWRDN_N_) {
830 		/* clear it and wait to be cleared */
831 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
832 
833 		timeout = jiffies + HZ;
834 		do {
835 			usleep_range(1, 10);
836 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
837 			if (time_after(jiffies, timeout)) {
838 				netdev_warn(dev->net,
839 					    "timeout on OTP_PWR_DN");
840 				return -EIO;
841 			}
842 		} while (buf & OTP_PWR_DN_PWRDN_N_);
843 	}
844 
845 	for (i = 0; i < length; i++) {
846 		lan78xx_write_reg(dev, OTP_ADDR1,
847 					((offset + i) >> 8) & OTP_ADDR1_15_11);
848 		lan78xx_write_reg(dev, OTP_ADDR2,
849 					((offset + i) & OTP_ADDR2_10_3));
850 
851 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
852 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
853 
854 		timeout = jiffies + HZ;
855 		do {
856 			udelay(1);
857 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
858 			if (time_after(jiffies, timeout)) {
859 				netdev_warn(dev->net,
860 					    "timeout on OTP_STATUS");
861 				return -EIO;
862 			}
863 		} while (buf & OTP_STATUS_BUSY_);
864 
865 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
866 
867 		data[i] = (u8)(buf & 0xFF);
868 	}
869 
870 	return 0;
871 }
872 
873 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
874 				 u32 length, u8 *data)
875 {
876 	int i;
877 	u32 buf;
878 	unsigned long timeout;
879 
880 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
881 
882 	if (buf & OTP_PWR_DN_PWRDN_N_) {
883 		/* clear it and wait to be cleared */
884 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
885 
886 		timeout = jiffies + HZ;
887 		do {
888 			udelay(1);
889 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
890 			if (time_after(jiffies, timeout)) {
891 				netdev_warn(dev->net,
892 					    "timeout on OTP_PWR_DN completion");
893 				return -EIO;
894 			}
895 		} while (buf & OTP_PWR_DN_PWRDN_N_);
896 	}
897 
898 	/* set to BYTE program mode */
899 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
900 
901 	for (i = 0; i < length; i++) {
902 		lan78xx_write_reg(dev, OTP_ADDR1,
903 					((offset + i) >> 8) & OTP_ADDR1_15_11);
904 		lan78xx_write_reg(dev, OTP_ADDR2,
905 					((offset + i) & OTP_ADDR2_10_3));
906 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
907 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
908 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
909 
910 		timeout = jiffies + HZ;
911 		do {
912 			udelay(1);
913 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
914 			if (time_after(jiffies, timeout)) {
915 				netdev_warn(dev->net,
916 					    "Timeout on OTP_STATUS completion");
917 				return -EIO;
918 			}
919 		} while (buf & OTP_STATUS_BUSY_);
920 	}
921 
922 	return 0;
923 }
924 
925 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
926 			    u32 length, u8 *data)
927 {
928 	u8 sig;
929 	int ret;
930 
931 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
932 
933 	if (ret == 0) {
934 		if (sig == OTP_INDICATOR_2)
935 			offset += 0x100;
936 		else if (sig != OTP_INDICATOR_1)
937 			ret = -EINVAL;
938 		if (!ret)
939 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
940 	}
941 
942 	return ret;
943 }
944 
945 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
946 {
947 	int i, ret;
948 
949 	for (i = 0; i < 100; i++) {
950 		u32 dp_sel;
951 
952 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
953 		if (unlikely(ret < 0))
954 			return -EIO;
955 
956 		if (dp_sel & DP_SEL_DPRDY_)
957 			return 0;
958 
959 		usleep_range(40, 100);
960 	}
961 
962 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
963 
964 	return -EIO;
965 }
966 
967 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
968 				  u32 addr, u32 length, u32 *buf)
969 {
970 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
971 	u32 dp_sel;
972 	int i, ret;
973 
974 	if (usb_autopm_get_interface(dev->intf) < 0)
975 			return 0;
976 
977 	mutex_lock(&pdata->dataport_mutex);
978 
979 	ret = lan78xx_dataport_wait_not_busy(dev);
980 	if (ret < 0)
981 		goto done;
982 
983 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
984 
985 	dp_sel &= ~DP_SEL_RSEL_MASK_;
986 	dp_sel |= ram_select;
987 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
988 
989 	for (i = 0; i < length; i++) {
990 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
991 
992 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
993 
994 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
995 
996 		ret = lan78xx_dataport_wait_not_busy(dev);
997 		if (ret < 0)
998 			goto done;
999 	}
1000 
1001 done:
1002 	mutex_unlock(&pdata->dataport_mutex);
1003 	usb_autopm_put_interface(dev->intf);
1004 
1005 	return ret;
1006 }
1007 
1008 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1009 				    int index, u8 addr[ETH_ALEN])
1010 {
1011 	u32 temp;
1012 
1013 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1014 		temp = addr[3];
1015 		temp = addr[2] | (temp << 8);
1016 		temp = addr[1] | (temp << 8);
1017 		temp = addr[0] | (temp << 8);
1018 		pdata->pfilter_table[index][1] = temp;
1019 		temp = addr[5];
1020 		temp = addr[4] | (temp << 8);
1021 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1022 		pdata->pfilter_table[index][0] = temp;
1023 	}
1024 }
1025 
1026 /* returns hash bit number for given MAC address */
1027 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1028 {
1029 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1030 }
1031 
1032 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1033 {
1034 	struct lan78xx_priv *pdata =
1035 			container_of(param, struct lan78xx_priv, set_multicast);
1036 	struct lan78xx_net *dev = pdata->dev;
1037 	int i;
1038 
1039 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1040 		  pdata->rfe_ctl);
1041 
1042 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1043 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1044 
1045 	for (i = 1; i < NUM_OF_MAF; i++) {
1046 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1047 		lan78xx_write_reg(dev, MAF_LO(i),
1048 					pdata->pfilter_table[i][1]);
1049 		lan78xx_write_reg(dev, MAF_HI(i),
1050 					pdata->pfilter_table[i][0]);
1051 	}
1052 
1053 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1054 }
1055 
1056 static void lan78xx_set_multicast(struct net_device *netdev)
1057 {
1058 	struct lan78xx_net *dev = netdev_priv(netdev);
1059 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1060 	unsigned long flags;
1061 	int i;
1062 
1063 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1064 
1065 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1066 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1067 
1068 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1069 			pdata->mchash_table[i] = 0;
1070 	/* pfilter_table[0] has own HW address */
1071 	for (i = 1; i < NUM_OF_MAF; i++) {
1072 			pdata->pfilter_table[i][0] =
1073 			pdata->pfilter_table[i][1] = 0;
1074 	}
1075 
1076 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1077 
1078 	if (dev->net->flags & IFF_PROMISC) {
1079 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1080 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1081 	} else {
1082 		if (dev->net->flags & IFF_ALLMULTI) {
1083 			netif_dbg(dev, drv, dev->net,
1084 				  "receive all multicast enabled");
1085 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1086 		}
1087 	}
1088 
1089 	if (netdev_mc_count(dev->net)) {
1090 		struct netdev_hw_addr *ha;
1091 		int i;
1092 
1093 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1094 
1095 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1096 
1097 		i = 1;
1098 		netdev_for_each_mc_addr(ha, netdev) {
1099 			/* set first 32 into Perfect Filter */
1100 			if (i < 33) {
1101 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1102 			} else {
1103 				u32 bitnum = lan78xx_hash(ha->addr);
1104 
1105 				pdata->mchash_table[bitnum / 32] |=
1106 							(1 << (bitnum % 32));
1107 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1108 			}
1109 			i++;
1110 		}
1111 	}
1112 
1113 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1114 
1115 	/* defer register writes to a sleepable context */
1116 	schedule_work(&pdata->set_multicast);
1117 }
1118 
1119 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1120 				      u16 lcladv, u16 rmtadv)
1121 {
1122 	u32 flow = 0, fct_flow = 0;
1123 	u8 cap;
1124 
1125 	if (dev->fc_autoneg)
1126 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1127 	else
1128 		cap = dev->fc_request_control;
1129 
1130 	if (cap & FLOW_CTRL_TX)
1131 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1132 
1133 	if (cap & FLOW_CTRL_RX)
1134 		flow |= FLOW_CR_RX_FCEN_;
1135 
1136 	if (dev->udev->speed == USB_SPEED_SUPER)
1137 		fct_flow = 0x817;
1138 	else if (dev->udev->speed == USB_SPEED_HIGH)
1139 		fct_flow = 0x211;
1140 
1141 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1142 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1143 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1144 
1145 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1146 
1147 	/* threshold value should be set before enabling flow */
1148 	lan78xx_write_reg(dev, FLOW, flow);
1149 
1150 	return 0;
1151 }
1152 
1153 static int lan78xx_link_reset(struct lan78xx_net *dev)
1154 {
1155 	struct phy_device *phydev = dev->net->phydev;
1156 	struct ethtool_link_ksettings ecmd;
1157 	int ladv, radv, ret, link;
1158 	u32 buf;
1159 
1160 	/* clear LAN78xx interrupt status */
1161 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1162 	if (unlikely(ret < 0))
1163 		return -EIO;
1164 
1165 	mutex_lock(&phydev->lock);
1166 	phy_read_status(phydev);
1167 	link = phydev->link;
1168 	mutex_unlock(&phydev->lock);
1169 
1170 	if (!link && dev->link_on) {
1171 		dev->link_on = false;
1172 
1173 		/* reset MAC */
1174 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1175 		if (unlikely(ret < 0))
1176 			return -EIO;
1177 		buf |= MAC_CR_RST_;
1178 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1179 		if (unlikely(ret < 0))
1180 			return -EIO;
1181 
1182 		del_timer(&dev->stat_monitor);
1183 	} else if (link && !dev->link_on) {
1184 		dev->link_on = true;
1185 
1186 		phy_ethtool_ksettings_get(phydev, &ecmd);
1187 
1188 		if (dev->udev->speed == USB_SPEED_SUPER) {
1189 			if (ecmd.base.speed == 1000) {
1190 				/* disable U2 */
1191 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1192 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1193 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1194 				/* enable U1 */
1195 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1196 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1197 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1198 			} else {
1199 				/* enable U1 & U2 */
1200 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1201 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1202 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1203 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1204 			}
1205 		}
1206 
1207 		ladv = phy_read(phydev, MII_ADVERTISE);
1208 		if (ladv < 0)
1209 			return ladv;
1210 
1211 		radv = phy_read(phydev, MII_LPA);
1212 		if (radv < 0)
1213 			return radv;
1214 
1215 		netif_dbg(dev, link, dev->net,
1216 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1217 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1218 
1219 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1220 						 radv);
1221 
1222 		if (!timer_pending(&dev->stat_monitor)) {
1223 			dev->delta = 1;
1224 			mod_timer(&dev->stat_monitor,
1225 				  jiffies + STAT_UPDATE_TIMER);
1226 		}
1227 
1228 		tasklet_schedule(&dev->bh);
1229 	}
1230 
1231 	return ret;
1232 }
1233 
1234 /* some work can't be done in tasklets, so we use keventd
1235  *
1236  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1237  * but tasklet_schedule() doesn't.	hope the failure is rare.
1238  */
1239 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1240 {
1241 	set_bit(work, &dev->flags);
1242 	if (!schedule_delayed_work(&dev->wq, 0))
1243 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1244 }
1245 
1246 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1247 {
1248 	u32 intdata;
1249 
1250 	if (urb->actual_length != 4) {
1251 		netdev_warn(dev->net,
1252 			    "unexpected urb length %d", urb->actual_length);
1253 		return;
1254 	}
1255 
1256 	intdata = get_unaligned_le32(urb->transfer_buffer);
1257 
1258 	if (intdata & INT_ENP_PHY_INT) {
1259 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1260 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1261 
1262 		if (dev->domain_data.phyirq > 0) {
1263 			local_irq_disable();
1264 			generic_handle_irq(dev->domain_data.phyirq);
1265 			local_irq_enable();
1266 		}
1267 	} else
1268 		netdev_warn(dev->net,
1269 			    "unexpected interrupt: 0x%08x\n", intdata);
1270 }
1271 
1272 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1273 {
1274 	return MAX_EEPROM_SIZE;
1275 }
1276 
1277 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1278 				      struct ethtool_eeprom *ee, u8 *data)
1279 {
1280 	struct lan78xx_net *dev = netdev_priv(netdev);
1281 	int ret;
1282 
1283 	ret = usb_autopm_get_interface(dev->intf);
1284 	if (ret)
1285 		return ret;
1286 
1287 	ee->magic = LAN78XX_EEPROM_MAGIC;
1288 
1289 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1290 
1291 	usb_autopm_put_interface(dev->intf);
1292 
1293 	return ret;
1294 }
1295 
1296 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1297 				      struct ethtool_eeprom *ee, u8 *data)
1298 {
1299 	struct lan78xx_net *dev = netdev_priv(netdev);
1300 	int ret;
1301 
1302 	ret = usb_autopm_get_interface(dev->intf);
1303 	if (ret)
1304 		return ret;
1305 
1306 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1307 	 * to load data from EEPROM
1308 	 */
1309 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1310 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1311 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1312 		 (ee->offset == 0) &&
1313 		 (ee->len == 512) &&
1314 		 (data[0] == OTP_INDICATOR_1))
1315 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1316 
1317 	usb_autopm_put_interface(dev->intf);
1318 
1319 	return ret;
1320 }
1321 
1322 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1323 				u8 *data)
1324 {
1325 	if (stringset == ETH_SS_STATS)
1326 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1327 }
1328 
1329 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1330 {
1331 	if (sset == ETH_SS_STATS)
1332 		return ARRAY_SIZE(lan78xx_gstrings);
1333 	else
1334 		return -EOPNOTSUPP;
1335 }
1336 
1337 static void lan78xx_get_stats(struct net_device *netdev,
1338 			      struct ethtool_stats *stats, u64 *data)
1339 {
1340 	struct lan78xx_net *dev = netdev_priv(netdev);
1341 
1342 	lan78xx_update_stats(dev);
1343 
1344 	mutex_lock(&dev->stats.access_lock);
1345 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1346 	mutex_unlock(&dev->stats.access_lock);
1347 }
1348 
1349 static void lan78xx_get_wol(struct net_device *netdev,
1350 			    struct ethtool_wolinfo *wol)
1351 {
1352 	struct lan78xx_net *dev = netdev_priv(netdev);
1353 	int ret;
1354 	u32 buf;
1355 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1356 
1357 	if (usb_autopm_get_interface(dev->intf) < 0)
1358 			return;
1359 
1360 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1361 	if (unlikely(ret < 0)) {
1362 		wol->supported = 0;
1363 		wol->wolopts = 0;
1364 	} else {
1365 		if (buf & USB_CFG_RMT_WKP_) {
1366 			wol->supported = WAKE_ALL;
1367 			wol->wolopts = pdata->wol;
1368 		} else {
1369 			wol->supported = 0;
1370 			wol->wolopts = 0;
1371 		}
1372 	}
1373 
1374 	usb_autopm_put_interface(dev->intf);
1375 }
1376 
1377 static int lan78xx_set_wol(struct net_device *netdev,
1378 			   struct ethtool_wolinfo *wol)
1379 {
1380 	struct lan78xx_net *dev = netdev_priv(netdev);
1381 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1382 	int ret;
1383 
1384 	ret = usb_autopm_get_interface(dev->intf);
1385 	if (ret < 0)
1386 		return ret;
1387 
1388 	if (wol->wolopts & ~WAKE_ALL)
1389 		return -EINVAL;
1390 
1391 	pdata->wol = wol->wolopts;
1392 
1393 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1394 
1395 	phy_ethtool_set_wol(netdev->phydev, wol);
1396 
1397 	usb_autopm_put_interface(dev->intf);
1398 
1399 	return ret;
1400 }
1401 
1402 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1403 {
1404 	struct lan78xx_net *dev = netdev_priv(net);
1405 	struct phy_device *phydev = net->phydev;
1406 	int ret;
1407 	u32 buf;
1408 
1409 	ret = usb_autopm_get_interface(dev->intf);
1410 	if (ret < 0)
1411 		return ret;
1412 
1413 	ret = phy_ethtool_get_eee(phydev, edata);
1414 	if (ret < 0)
1415 		goto exit;
1416 
1417 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1418 	if (buf & MAC_CR_EEE_EN_) {
1419 		edata->eee_enabled = true;
1420 		edata->eee_active = !!(edata->advertised &
1421 				       edata->lp_advertised);
1422 		edata->tx_lpi_enabled = true;
1423 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1424 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1425 		edata->tx_lpi_timer = buf;
1426 	} else {
1427 		edata->eee_enabled = false;
1428 		edata->eee_active = false;
1429 		edata->tx_lpi_enabled = false;
1430 		edata->tx_lpi_timer = 0;
1431 	}
1432 
1433 	ret = 0;
1434 exit:
1435 	usb_autopm_put_interface(dev->intf);
1436 
1437 	return ret;
1438 }
1439 
1440 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1441 {
1442 	struct lan78xx_net *dev = netdev_priv(net);
1443 	int ret;
1444 	u32 buf;
1445 
1446 	ret = usb_autopm_get_interface(dev->intf);
1447 	if (ret < 0)
1448 		return ret;
1449 
1450 	if (edata->eee_enabled) {
1451 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1452 		buf |= MAC_CR_EEE_EN_;
1453 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1454 
1455 		phy_ethtool_set_eee(net->phydev, edata);
1456 
1457 		buf = (u32)edata->tx_lpi_timer;
1458 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1459 	} else {
1460 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1461 		buf &= ~MAC_CR_EEE_EN_;
1462 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1463 	}
1464 
1465 	usb_autopm_put_interface(dev->intf);
1466 
1467 	return 0;
1468 }
1469 
1470 static u32 lan78xx_get_link(struct net_device *net)
1471 {
1472 	u32 link;
1473 
1474 	mutex_lock(&net->phydev->lock);
1475 	phy_read_status(net->phydev);
1476 	link = net->phydev->link;
1477 	mutex_unlock(&net->phydev->lock);
1478 
1479 	return link;
1480 }
1481 
1482 static void lan78xx_get_drvinfo(struct net_device *net,
1483 				struct ethtool_drvinfo *info)
1484 {
1485 	struct lan78xx_net *dev = netdev_priv(net);
1486 
1487 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1488 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1489 }
1490 
1491 static u32 lan78xx_get_msglevel(struct net_device *net)
1492 {
1493 	struct lan78xx_net *dev = netdev_priv(net);
1494 
1495 	return dev->msg_enable;
1496 }
1497 
1498 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1499 {
1500 	struct lan78xx_net *dev = netdev_priv(net);
1501 
1502 	dev->msg_enable = level;
1503 }
1504 
1505 static int lan78xx_get_link_ksettings(struct net_device *net,
1506 				      struct ethtool_link_ksettings *cmd)
1507 {
1508 	struct lan78xx_net *dev = netdev_priv(net);
1509 	struct phy_device *phydev = net->phydev;
1510 	int ret;
1511 
1512 	ret = usb_autopm_get_interface(dev->intf);
1513 	if (ret < 0)
1514 		return ret;
1515 
1516 	phy_ethtool_ksettings_get(phydev, cmd);
1517 
1518 	usb_autopm_put_interface(dev->intf);
1519 
1520 	return ret;
1521 }
1522 
1523 static int lan78xx_set_link_ksettings(struct net_device *net,
1524 				      const struct ethtool_link_ksettings *cmd)
1525 {
1526 	struct lan78xx_net *dev = netdev_priv(net);
1527 	struct phy_device *phydev = net->phydev;
1528 	int ret = 0;
1529 	int temp;
1530 
1531 	ret = usb_autopm_get_interface(dev->intf);
1532 	if (ret < 0)
1533 		return ret;
1534 
1535 	/* change speed & duplex */
1536 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1537 
1538 	if (!cmd->base.autoneg) {
1539 		/* force link down */
1540 		temp = phy_read(phydev, MII_BMCR);
1541 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1542 		mdelay(1);
1543 		phy_write(phydev, MII_BMCR, temp);
1544 	}
1545 
1546 	usb_autopm_put_interface(dev->intf);
1547 
1548 	return ret;
1549 }
1550 
1551 static void lan78xx_get_pause(struct net_device *net,
1552 			      struct ethtool_pauseparam *pause)
1553 {
1554 	struct lan78xx_net *dev = netdev_priv(net);
1555 	struct phy_device *phydev = net->phydev;
1556 	struct ethtool_link_ksettings ecmd;
1557 
1558 	phy_ethtool_ksettings_get(phydev, &ecmd);
1559 
1560 	pause->autoneg = dev->fc_autoneg;
1561 
1562 	if (dev->fc_request_control & FLOW_CTRL_TX)
1563 		pause->tx_pause = 1;
1564 
1565 	if (dev->fc_request_control & FLOW_CTRL_RX)
1566 		pause->rx_pause = 1;
1567 }
1568 
1569 static int lan78xx_set_pause(struct net_device *net,
1570 			     struct ethtool_pauseparam *pause)
1571 {
1572 	struct lan78xx_net *dev = netdev_priv(net);
1573 	struct phy_device *phydev = net->phydev;
1574 	struct ethtool_link_ksettings ecmd;
1575 	int ret;
1576 
1577 	phy_ethtool_ksettings_get(phydev, &ecmd);
1578 
1579 	if (pause->autoneg && !ecmd.base.autoneg) {
1580 		ret = -EINVAL;
1581 		goto exit;
1582 	}
1583 
1584 	dev->fc_request_control = 0;
1585 	if (pause->rx_pause)
1586 		dev->fc_request_control |= FLOW_CTRL_RX;
1587 
1588 	if (pause->tx_pause)
1589 		dev->fc_request_control |= FLOW_CTRL_TX;
1590 
1591 	if (ecmd.base.autoneg) {
1592 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1593 		u32 mii_adv;
1594 
1595 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1596 				   ecmd.link_modes.advertising);
1597 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1598 				   ecmd.link_modes.advertising);
1599 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1600 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1601 		linkmode_or(ecmd.link_modes.advertising, fc,
1602 			    ecmd.link_modes.advertising);
1603 
1604 		phy_ethtool_ksettings_set(phydev, &ecmd);
1605 	}
1606 
1607 	dev->fc_autoneg = pause->autoneg;
1608 
1609 	ret = 0;
1610 exit:
1611 	return ret;
1612 }
1613 
1614 static int lan78xx_get_regs_len(struct net_device *netdev)
1615 {
1616 	if (!netdev->phydev)
1617 		return (sizeof(lan78xx_regs));
1618 	else
1619 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1620 }
1621 
1622 static void
1623 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1624 		 void *buf)
1625 {
1626 	u32 *data = buf;
1627 	int i, j;
1628 	struct lan78xx_net *dev = netdev_priv(netdev);
1629 
1630 	/* Read Device/MAC registers */
1631 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1632 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1633 
1634 	if (!netdev->phydev)
1635 		return;
1636 
1637 	/* Read PHY registers */
1638 	for (j = 0; j < 32; i++, j++)
1639 		data[i] = phy_read(netdev->phydev, j);
1640 }
1641 
1642 static const struct ethtool_ops lan78xx_ethtool_ops = {
1643 	.get_link	= lan78xx_get_link,
1644 	.nway_reset	= phy_ethtool_nway_reset,
1645 	.get_drvinfo	= lan78xx_get_drvinfo,
1646 	.get_msglevel	= lan78xx_get_msglevel,
1647 	.set_msglevel	= lan78xx_set_msglevel,
1648 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1649 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1650 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1651 	.get_ethtool_stats = lan78xx_get_stats,
1652 	.get_sset_count = lan78xx_get_sset_count,
1653 	.get_strings	= lan78xx_get_strings,
1654 	.get_wol	= lan78xx_get_wol,
1655 	.set_wol	= lan78xx_set_wol,
1656 	.get_ts_info	= ethtool_op_get_ts_info,
1657 	.get_eee	= lan78xx_get_eee,
1658 	.set_eee	= lan78xx_set_eee,
1659 	.get_pauseparam	= lan78xx_get_pause,
1660 	.set_pauseparam	= lan78xx_set_pause,
1661 	.get_link_ksettings = lan78xx_get_link_ksettings,
1662 	.set_link_ksettings = lan78xx_set_link_ksettings,
1663 	.get_regs_len	= lan78xx_get_regs_len,
1664 	.get_regs	= lan78xx_get_regs,
1665 };
1666 
1667 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1668 {
1669 	u32 addr_lo, addr_hi;
1670 	u8 addr[6];
1671 
1672 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1673 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1674 
1675 	addr[0] = addr_lo & 0xFF;
1676 	addr[1] = (addr_lo >> 8) & 0xFF;
1677 	addr[2] = (addr_lo >> 16) & 0xFF;
1678 	addr[3] = (addr_lo >> 24) & 0xFF;
1679 	addr[4] = addr_hi & 0xFF;
1680 	addr[5] = (addr_hi >> 8) & 0xFF;
1681 
1682 	if (!is_valid_ether_addr(addr)) {
1683 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1684 			/* valid address present in Device Tree */
1685 			netif_dbg(dev, ifup, dev->net,
1686 				  "MAC address read from Device Tree");
1687 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1688 						 ETH_ALEN, addr) == 0) ||
1689 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1690 					      ETH_ALEN, addr) == 0)) &&
1691 			   is_valid_ether_addr(addr)) {
1692 			/* eeprom values are valid so use them */
1693 			netif_dbg(dev, ifup, dev->net,
1694 				  "MAC address read from EEPROM");
1695 		} else {
1696 			/* generate random MAC */
1697 			eth_random_addr(addr);
1698 			netif_dbg(dev, ifup, dev->net,
1699 				  "MAC address set to random addr");
1700 		}
1701 
1702 		addr_lo = addr[0] | (addr[1] << 8) |
1703 			  (addr[2] << 16) | (addr[3] << 24);
1704 		addr_hi = addr[4] | (addr[5] << 8);
1705 
1706 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1707 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1708 	}
1709 
1710 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1711 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1712 
1713 	ether_addr_copy(dev->net->dev_addr, addr);
1714 }
1715 
1716 /* MDIO read and write wrappers for phylib */
1717 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1718 {
1719 	struct lan78xx_net *dev = bus->priv;
1720 	u32 val, addr;
1721 	int ret;
1722 
1723 	ret = usb_autopm_get_interface(dev->intf);
1724 	if (ret < 0)
1725 		return ret;
1726 
1727 	mutex_lock(&dev->phy_mutex);
1728 
1729 	/* confirm MII not busy */
1730 	ret = lan78xx_phy_wait_not_busy(dev);
1731 	if (ret < 0)
1732 		goto done;
1733 
1734 	/* set the address, index & direction (read from PHY) */
1735 	addr = mii_access(phy_id, idx, MII_READ);
1736 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1737 
1738 	ret = lan78xx_phy_wait_not_busy(dev);
1739 	if (ret < 0)
1740 		goto done;
1741 
1742 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1743 
1744 	ret = (int)(val & 0xFFFF);
1745 
1746 done:
1747 	mutex_unlock(&dev->phy_mutex);
1748 	usb_autopm_put_interface(dev->intf);
1749 
1750 	return ret;
1751 }
1752 
1753 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1754 				 u16 regval)
1755 {
1756 	struct lan78xx_net *dev = bus->priv;
1757 	u32 val, addr;
1758 	int ret;
1759 
1760 	ret = usb_autopm_get_interface(dev->intf);
1761 	if (ret < 0)
1762 		return ret;
1763 
1764 	mutex_lock(&dev->phy_mutex);
1765 
1766 	/* confirm MII not busy */
1767 	ret = lan78xx_phy_wait_not_busy(dev);
1768 	if (ret < 0)
1769 		goto done;
1770 
1771 	val = (u32)regval;
1772 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1773 
1774 	/* set the address, index & direction (write to PHY) */
1775 	addr = mii_access(phy_id, idx, MII_WRITE);
1776 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1777 
1778 	ret = lan78xx_phy_wait_not_busy(dev);
1779 	if (ret < 0)
1780 		goto done;
1781 
1782 done:
1783 	mutex_unlock(&dev->phy_mutex);
1784 	usb_autopm_put_interface(dev->intf);
1785 	return 0;
1786 }
1787 
1788 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1789 {
1790 	struct device_node *node;
1791 	int ret;
1792 
1793 	dev->mdiobus = mdiobus_alloc();
1794 	if (!dev->mdiobus) {
1795 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1796 		return -ENOMEM;
1797 	}
1798 
1799 	dev->mdiobus->priv = (void *)dev;
1800 	dev->mdiobus->read = lan78xx_mdiobus_read;
1801 	dev->mdiobus->write = lan78xx_mdiobus_write;
1802 	dev->mdiobus->name = "lan78xx-mdiobus";
1803 	dev->mdiobus->parent = &dev->udev->dev;
1804 
1805 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1806 		 dev->udev->bus->busnum, dev->udev->devnum);
1807 
1808 	switch (dev->chipid) {
1809 	case ID_REV_CHIP_ID_7800_:
1810 	case ID_REV_CHIP_ID_7850_:
1811 		/* set to internal PHY id */
1812 		dev->mdiobus->phy_mask = ~(1 << 1);
1813 		break;
1814 	case ID_REV_CHIP_ID_7801_:
1815 		/* scan thru PHYAD[2..0] */
1816 		dev->mdiobus->phy_mask = ~(0xFF);
1817 		break;
1818 	}
1819 
1820 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1821 	ret = of_mdiobus_register(dev->mdiobus, node);
1822 	of_node_put(node);
1823 	if (ret) {
1824 		netdev_err(dev->net, "can't register MDIO bus\n");
1825 		goto exit1;
1826 	}
1827 
1828 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1829 	return 0;
1830 exit1:
1831 	mdiobus_free(dev->mdiobus);
1832 	return ret;
1833 }
1834 
1835 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1836 {
1837 	mdiobus_unregister(dev->mdiobus);
1838 	mdiobus_free(dev->mdiobus);
1839 }
1840 
1841 static void lan78xx_link_status_change(struct net_device *net)
1842 {
1843 	struct phy_device *phydev = net->phydev;
1844 	int temp;
1845 
1846 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1847 	 * when cable is switched between long(~50+m) and short one.
1848 	 * As workaround, set to 10 before setting to 100
1849 	 * at forced 100 F/H mode.
1850 	 */
1851 	if (!phydev->autoneg && (phydev->speed == 100)) {
1852 		/* disable phy interrupt */
1853 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1854 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1855 		phy_write(phydev, LAN88XX_INT_MASK, temp);
1856 
1857 		temp = phy_read(phydev, MII_BMCR);
1858 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1859 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1860 		temp |= BMCR_SPEED100;
1861 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1862 
1863 		/* clear pending interrupt generated while workaround */
1864 		temp = phy_read(phydev, LAN88XX_INT_STS);
1865 
1866 		/* enable phy interrupt back */
1867 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1868 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1869 		phy_write(phydev, LAN88XX_INT_MASK, temp);
1870 	}
1871 }
1872 
1873 static int irq_map(struct irq_domain *d, unsigned int irq,
1874 		   irq_hw_number_t hwirq)
1875 {
1876 	struct irq_domain_data *data = d->host_data;
1877 
1878 	irq_set_chip_data(irq, data);
1879 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1880 	irq_set_noprobe(irq);
1881 
1882 	return 0;
1883 }
1884 
1885 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1886 {
1887 	irq_set_chip_and_handler(irq, NULL, NULL);
1888 	irq_set_chip_data(irq, NULL);
1889 }
1890 
1891 static const struct irq_domain_ops chip_domain_ops = {
1892 	.map	= irq_map,
1893 	.unmap	= irq_unmap,
1894 };
1895 
1896 static void lan78xx_irq_mask(struct irq_data *irqd)
1897 {
1898 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1899 
1900 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1901 }
1902 
1903 static void lan78xx_irq_unmask(struct irq_data *irqd)
1904 {
1905 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1906 
1907 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1908 }
1909 
1910 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1911 {
1912 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1913 
1914 	mutex_lock(&data->irq_lock);
1915 }
1916 
1917 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1918 {
1919 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1920 	struct lan78xx_net *dev =
1921 			container_of(data, struct lan78xx_net, domain_data);
1922 	u32 buf;
1923 
1924 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
1925 	 * are only two callbacks executed in non-atomic contex.
1926 	 */
1927 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1928 	if (buf != data->irqenable)
1929 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1930 
1931 	mutex_unlock(&data->irq_lock);
1932 }
1933 
1934 static struct irq_chip lan78xx_irqchip = {
1935 	.name			= "lan78xx-irqs",
1936 	.irq_mask		= lan78xx_irq_mask,
1937 	.irq_unmask		= lan78xx_irq_unmask,
1938 	.irq_bus_lock		= lan78xx_irq_bus_lock,
1939 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
1940 };
1941 
1942 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1943 {
1944 	struct device_node *of_node;
1945 	struct irq_domain *irqdomain;
1946 	unsigned int irqmap = 0;
1947 	u32 buf;
1948 	int ret = 0;
1949 
1950 	of_node = dev->udev->dev.parent->of_node;
1951 
1952 	mutex_init(&dev->domain_data.irq_lock);
1953 
1954 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1955 	dev->domain_data.irqenable = buf;
1956 
1957 	dev->domain_data.irqchip = &lan78xx_irqchip;
1958 	dev->domain_data.irq_handler = handle_simple_irq;
1959 
1960 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1961 					  &chip_domain_ops, &dev->domain_data);
1962 	if (irqdomain) {
1963 		/* create mapping for PHY interrupt */
1964 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1965 		if (!irqmap) {
1966 			irq_domain_remove(irqdomain);
1967 
1968 			irqdomain = NULL;
1969 			ret = -EINVAL;
1970 		}
1971 	} else {
1972 		ret = -EINVAL;
1973 	}
1974 
1975 	dev->domain_data.irqdomain = irqdomain;
1976 	dev->domain_data.phyirq = irqmap;
1977 
1978 	return ret;
1979 }
1980 
1981 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1982 {
1983 	if (dev->domain_data.phyirq > 0) {
1984 		irq_dispose_mapping(dev->domain_data.phyirq);
1985 
1986 		if (dev->domain_data.irqdomain)
1987 			irq_domain_remove(dev->domain_data.irqdomain);
1988 	}
1989 	dev->domain_data.phyirq = 0;
1990 	dev->domain_data.irqdomain = NULL;
1991 }
1992 
1993 static int lan8835_fixup(struct phy_device *phydev)
1994 {
1995 	int buf;
1996 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1997 
1998 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1999 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2000 	buf &= ~0x1800;
2001 	buf |= 0x0800;
2002 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2003 
2004 	/* RGMII MAC TXC Delay Enable */
2005 	lan78xx_write_reg(dev, MAC_RGMII_ID,
2006 				MAC_RGMII_ID_TXC_DELAY_EN_);
2007 
2008 	/* RGMII TX DLL Tune Adjust */
2009 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2010 
2011 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2012 
2013 	return 1;
2014 }
2015 
2016 static int ksz9031rnx_fixup(struct phy_device *phydev)
2017 {
2018 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2019 
2020 	/* Micrel9301RNX PHY configuration */
2021 	/* RGMII Control Signal Pad Skew */
2022 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2023 	/* RGMII RX Data Pad Skew */
2024 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2025 	/* RGMII RX Clock Pad Skew */
2026 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2027 
2028 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2029 
2030 	return 1;
2031 }
2032 
2033 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2034 {
2035 	u32 buf;
2036 	int ret;
2037 	struct fixed_phy_status fphy_status = {
2038 		.link = 1,
2039 		.speed = SPEED_1000,
2040 		.duplex = DUPLEX_FULL,
2041 	};
2042 	struct phy_device *phydev;
2043 
2044 	phydev = phy_find_first(dev->mdiobus);
2045 	if (!phydev) {
2046 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2047 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2048 		if (IS_ERR(phydev)) {
2049 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2050 			return NULL;
2051 		}
2052 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2053 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2054 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2055 					MAC_RGMII_ID_TXC_DELAY_EN_);
2056 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2057 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2058 		buf |= HW_CFG_CLK125_EN_;
2059 		buf |= HW_CFG_REFCLK25_EN_;
2060 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2061 	} else {
2062 		if (!phydev->drv) {
2063 			netdev_err(dev->net, "no PHY driver found\n");
2064 			return NULL;
2065 		}
2066 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2067 		/* external PHY fixup for KSZ9031RNX */
2068 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2069 						 ksz9031rnx_fixup);
2070 		if (ret < 0) {
2071 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2072 			return NULL;
2073 		}
2074 		/* external PHY fixup for LAN8835 */
2075 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2076 						 lan8835_fixup);
2077 		if (ret < 0) {
2078 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2079 			return NULL;
2080 		}
2081 		/* add more external PHY fixup here if needed */
2082 
2083 		phydev->is_internal = false;
2084 	}
2085 	return phydev;
2086 }
2087 
2088 static int lan78xx_phy_init(struct lan78xx_net *dev)
2089 {
2090 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2091 	int ret;
2092 	u32 mii_adv;
2093 	struct phy_device *phydev;
2094 
2095 	switch (dev->chipid) {
2096 	case ID_REV_CHIP_ID_7801_:
2097 		phydev = lan7801_phy_init(dev);
2098 		if (!phydev) {
2099 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2100 			return -EIO;
2101 		}
2102 		break;
2103 
2104 	case ID_REV_CHIP_ID_7800_:
2105 	case ID_REV_CHIP_ID_7850_:
2106 		phydev = phy_find_first(dev->mdiobus);
2107 		if (!phydev) {
2108 			netdev_err(dev->net, "no PHY found\n");
2109 			return -EIO;
2110 		}
2111 		phydev->is_internal = true;
2112 		dev->interface = PHY_INTERFACE_MODE_GMII;
2113 		break;
2114 
2115 	default:
2116 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2117 		return -EIO;
2118 	}
2119 
2120 	/* if phyirq is not set, use polling mode in phylib */
2121 	if (dev->domain_data.phyirq > 0)
2122 		phydev->irq = dev->domain_data.phyirq;
2123 	else
2124 		phydev->irq = 0;
2125 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2126 
2127 	/* set to AUTOMDIX */
2128 	phydev->mdix = ETH_TP_MDI_AUTO;
2129 
2130 	ret = phy_connect_direct(dev->net, phydev,
2131 				 lan78xx_link_status_change,
2132 				 dev->interface);
2133 	if (ret) {
2134 		netdev_err(dev->net, "can't attach PHY to %s\n",
2135 			   dev->mdiobus->id);
2136 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2137 			if (phy_is_pseudo_fixed_link(phydev)) {
2138 				fixed_phy_unregister(phydev);
2139 			} else {
2140 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2141 							     0xfffffff0);
2142 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2143 							     0xfffffff0);
2144 			}
2145 		}
2146 		return -EIO;
2147 	}
2148 
2149 	/* MAC doesn't support 1000T Half */
2150 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2151 
2152 	/* support both flow controls */
2153 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2154 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2155 			   phydev->advertising);
2156 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2157 			   phydev->advertising);
2158 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2159 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2160 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2161 
2162 	if (phydev->mdio.dev.of_node) {
2163 		u32 reg;
2164 		int len;
2165 
2166 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2167 						      "microchip,led-modes",
2168 						      sizeof(u32));
2169 		if (len >= 0) {
2170 			/* Ensure the appropriate LEDs are enabled */
2171 			lan78xx_read_reg(dev, HW_CFG, &reg);
2172 			reg &= ~(HW_CFG_LED0_EN_ |
2173 				 HW_CFG_LED1_EN_ |
2174 				 HW_CFG_LED2_EN_ |
2175 				 HW_CFG_LED3_EN_);
2176 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2177 				(len > 1) * HW_CFG_LED1_EN_ |
2178 				(len > 2) * HW_CFG_LED2_EN_ |
2179 				(len > 3) * HW_CFG_LED3_EN_;
2180 			lan78xx_write_reg(dev, HW_CFG, reg);
2181 		}
2182 	}
2183 
2184 	genphy_config_aneg(phydev);
2185 
2186 	dev->fc_autoneg = phydev->autoneg;
2187 
2188 	return 0;
2189 }
2190 
2191 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2192 {
2193 	u32 buf;
2194 	bool rxenabled;
2195 
2196 	lan78xx_read_reg(dev, MAC_RX, &buf);
2197 
2198 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2199 
2200 	if (rxenabled) {
2201 		buf &= ~MAC_RX_RXEN_;
2202 		lan78xx_write_reg(dev, MAC_RX, buf);
2203 	}
2204 
2205 	/* add 4 to size for FCS */
2206 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2207 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2208 
2209 	lan78xx_write_reg(dev, MAC_RX, buf);
2210 
2211 	if (rxenabled) {
2212 		buf |= MAC_RX_RXEN_;
2213 		lan78xx_write_reg(dev, MAC_RX, buf);
2214 	}
2215 
2216 	return 0;
2217 }
2218 
2219 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2220 {
2221 	struct sk_buff *skb;
2222 	unsigned long flags;
2223 	int count = 0;
2224 
2225 	spin_lock_irqsave(&q->lock, flags);
2226 	while (!skb_queue_empty(q)) {
2227 		struct skb_data	*entry;
2228 		struct urb *urb;
2229 		int ret;
2230 
2231 		skb_queue_walk(q, skb) {
2232 			entry = (struct skb_data *)skb->cb;
2233 			if (entry->state != unlink_start)
2234 				goto found;
2235 		}
2236 		break;
2237 found:
2238 		entry->state = unlink_start;
2239 		urb = entry->urb;
2240 
2241 		/* Get reference count of the URB to avoid it to be
2242 		 * freed during usb_unlink_urb, which may trigger
2243 		 * use-after-free problem inside usb_unlink_urb since
2244 		 * usb_unlink_urb is always racing with .complete
2245 		 * handler(include defer_bh).
2246 		 */
2247 		usb_get_urb(urb);
2248 		spin_unlock_irqrestore(&q->lock, flags);
2249 		/* during some PM-driven resume scenarios,
2250 		 * these (async) unlinks complete immediately
2251 		 */
2252 		ret = usb_unlink_urb(urb);
2253 		if (ret != -EINPROGRESS && ret != 0)
2254 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2255 		else
2256 			count++;
2257 		usb_put_urb(urb);
2258 		spin_lock_irqsave(&q->lock, flags);
2259 	}
2260 	spin_unlock_irqrestore(&q->lock, flags);
2261 	return count;
2262 }
2263 
2264 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2265 {
2266 	struct lan78xx_net *dev = netdev_priv(netdev);
2267 	int ll_mtu = new_mtu + netdev->hard_header_len;
2268 	int old_hard_mtu = dev->hard_mtu;
2269 	int old_rx_urb_size = dev->rx_urb_size;
2270 
2271 	/* no second zero-length packet read wanted after mtu-sized packets */
2272 	if ((ll_mtu % dev->maxpacket) == 0)
2273 		return -EDOM;
2274 
2275 	lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2276 
2277 	netdev->mtu = new_mtu;
2278 
2279 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2280 	if (dev->rx_urb_size == old_hard_mtu) {
2281 		dev->rx_urb_size = dev->hard_mtu;
2282 		if (dev->rx_urb_size > old_rx_urb_size) {
2283 			if (netif_running(dev->net)) {
2284 				unlink_urbs(dev, &dev->rxq);
2285 				tasklet_schedule(&dev->bh);
2286 			}
2287 		}
2288 	}
2289 
2290 	return 0;
2291 }
2292 
2293 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2294 {
2295 	struct lan78xx_net *dev = netdev_priv(netdev);
2296 	struct sockaddr *addr = p;
2297 	u32 addr_lo, addr_hi;
2298 
2299 	if (netif_running(netdev))
2300 		return -EBUSY;
2301 
2302 	if (!is_valid_ether_addr(addr->sa_data))
2303 		return -EADDRNOTAVAIL;
2304 
2305 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2306 
2307 	addr_lo = netdev->dev_addr[0] |
2308 		  netdev->dev_addr[1] << 8 |
2309 		  netdev->dev_addr[2] << 16 |
2310 		  netdev->dev_addr[3] << 24;
2311 	addr_hi = netdev->dev_addr[4] |
2312 		  netdev->dev_addr[5] << 8;
2313 
2314 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2315 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2316 
2317 	/* Added to support MAC address changes */
2318 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2319 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2320 
2321 	return 0;
2322 }
2323 
2324 /* Enable or disable Rx checksum offload engine */
2325 static int lan78xx_set_features(struct net_device *netdev,
2326 				netdev_features_t features)
2327 {
2328 	struct lan78xx_net *dev = netdev_priv(netdev);
2329 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2330 	unsigned long flags;
2331 
2332 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2333 
2334 	if (features & NETIF_F_RXCSUM) {
2335 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2336 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2337 	} else {
2338 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2339 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2340 	}
2341 
2342 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2343 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2344 	else
2345 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2346 
2347 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2348 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2349 	else
2350 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2351 
2352 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2353 
2354 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2355 
2356 	return 0;
2357 }
2358 
2359 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2360 {
2361 	struct lan78xx_priv *pdata =
2362 			container_of(param, struct lan78xx_priv, set_vlan);
2363 	struct lan78xx_net *dev = pdata->dev;
2364 
2365 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2366 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2367 }
2368 
2369 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2370 				   __be16 proto, u16 vid)
2371 {
2372 	struct lan78xx_net *dev = netdev_priv(netdev);
2373 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2374 	u16 vid_bit_index;
2375 	u16 vid_dword_index;
2376 
2377 	vid_dword_index = (vid >> 5) & 0x7F;
2378 	vid_bit_index = vid & 0x1F;
2379 
2380 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2381 
2382 	/* defer register writes to a sleepable context */
2383 	schedule_work(&pdata->set_vlan);
2384 
2385 	return 0;
2386 }
2387 
2388 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2389 				    __be16 proto, u16 vid)
2390 {
2391 	struct lan78xx_net *dev = netdev_priv(netdev);
2392 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2393 	u16 vid_bit_index;
2394 	u16 vid_dword_index;
2395 
2396 	vid_dword_index = (vid >> 5) & 0x7F;
2397 	vid_bit_index = vid & 0x1F;
2398 
2399 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2400 
2401 	/* defer register writes to a sleepable context */
2402 	schedule_work(&pdata->set_vlan);
2403 
2404 	return 0;
2405 }
2406 
2407 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2408 {
2409 	int ret;
2410 	u32 buf;
2411 	u32 regs[6] = { 0 };
2412 
2413 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2414 	if (buf & USB_CFG1_LTM_ENABLE_) {
2415 		u8 temp[2];
2416 		/* Get values from EEPROM first */
2417 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2418 			if (temp[0] == 24) {
2419 				ret = lan78xx_read_raw_eeprom(dev,
2420 							      temp[1] * 2,
2421 							      24,
2422 							      (u8 *)regs);
2423 				if (ret < 0)
2424 					return;
2425 			}
2426 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2427 			if (temp[0] == 24) {
2428 				ret = lan78xx_read_raw_otp(dev,
2429 							   temp[1] * 2,
2430 							   24,
2431 							   (u8 *)regs);
2432 				if (ret < 0)
2433 					return;
2434 			}
2435 		}
2436 	}
2437 
2438 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2439 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2440 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2441 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2442 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2443 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2444 }
2445 
2446 static int lan78xx_reset(struct lan78xx_net *dev)
2447 {
2448 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2449 	u32 buf;
2450 	int ret = 0;
2451 	unsigned long timeout;
2452 	u8 sig;
2453 
2454 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2455 	buf |= HW_CFG_LRST_;
2456 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2457 
2458 	timeout = jiffies + HZ;
2459 	do {
2460 		mdelay(1);
2461 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2462 		if (time_after(jiffies, timeout)) {
2463 			netdev_warn(dev->net,
2464 				    "timeout on completion of LiteReset");
2465 			return -EIO;
2466 		}
2467 	} while (buf & HW_CFG_LRST_);
2468 
2469 	lan78xx_init_mac_address(dev);
2470 
2471 	/* save DEVID for later usage */
2472 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2473 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2474 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2475 
2476 	/* Respond to the IN token with a NAK */
2477 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2478 	buf |= USB_CFG_BIR_;
2479 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2480 
2481 	/* Init LTM */
2482 	lan78xx_init_ltm(dev);
2483 
2484 	if (dev->udev->speed == USB_SPEED_SUPER) {
2485 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2486 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2487 		dev->rx_qlen = 4;
2488 		dev->tx_qlen = 4;
2489 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2490 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2491 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2492 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2493 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2494 	} else {
2495 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2496 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2497 		dev->rx_qlen = 4;
2498 		dev->tx_qlen = 4;
2499 	}
2500 
2501 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2502 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2503 
2504 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2505 	buf |= HW_CFG_MEF_;
2506 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2507 
2508 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2509 	buf |= USB_CFG_BCE_;
2510 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2511 
2512 	/* set FIFO sizes */
2513 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2514 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2515 
2516 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2517 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2518 
2519 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2520 	ret = lan78xx_write_reg(dev, FLOW, 0);
2521 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2522 
2523 	/* Don't need rfe_ctl_lock during initialisation */
2524 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2525 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2526 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2527 
2528 	/* Enable or disable checksum offload engines */
2529 	lan78xx_set_features(dev->net, dev->net->features);
2530 
2531 	lan78xx_set_multicast(dev->net);
2532 
2533 	/* reset PHY */
2534 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2535 	buf |= PMT_CTL_PHY_RST_;
2536 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2537 
2538 	timeout = jiffies + HZ;
2539 	do {
2540 		mdelay(1);
2541 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2542 		if (time_after(jiffies, timeout)) {
2543 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2544 			return -EIO;
2545 		}
2546 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2547 
2548 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2549 	/* LAN7801 only has RGMII mode */
2550 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2551 		buf &= ~MAC_CR_GMII_EN_;
2552 
2553 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2554 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2555 		if (!ret && sig != EEPROM_INDICATOR) {
2556 			/* Implies there is no external eeprom. Set mac speed */
2557 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2558 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2559 		}
2560 	}
2561 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2562 
2563 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2564 	buf |= MAC_TX_TXEN_;
2565 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2566 
2567 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2568 	buf |= FCT_TX_CTL_EN_;
2569 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2570 
2571 	ret = lan78xx_set_rx_max_frame_length(dev,
2572 					      dev->net->mtu + VLAN_ETH_HLEN);
2573 
2574 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2575 	buf |= MAC_RX_RXEN_;
2576 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2577 
2578 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2579 	buf |= FCT_RX_CTL_EN_;
2580 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2581 
2582 	return 0;
2583 }
2584 
2585 static void lan78xx_init_stats(struct lan78xx_net *dev)
2586 {
2587 	u32 *p;
2588 	int i;
2589 
2590 	/* initialize for stats update
2591 	 * some counters are 20bits and some are 32bits
2592 	 */
2593 	p = (u32 *)&dev->stats.rollover_max;
2594 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2595 		p[i] = 0xFFFFF;
2596 
2597 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2598 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2599 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2600 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2601 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2602 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2603 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2604 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2605 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2606 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2607 
2608 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2609 }
2610 
2611 static int lan78xx_open(struct net_device *net)
2612 {
2613 	struct lan78xx_net *dev = netdev_priv(net);
2614 	int ret;
2615 
2616 	ret = usb_autopm_get_interface(dev->intf);
2617 	if (ret < 0)
2618 		goto out;
2619 
2620 	phy_start(net->phydev);
2621 
2622 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2623 
2624 	/* for Link Check */
2625 	if (dev->urb_intr) {
2626 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2627 		if (ret < 0) {
2628 			netif_err(dev, ifup, dev->net,
2629 				  "intr submit %d\n", ret);
2630 			goto done;
2631 		}
2632 	}
2633 
2634 	lan78xx_init_stats(dev);
2635 
2636 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2637 
2638 	netif_start_queue(net);
2639 
2640 	dev->link_on = false;
2641 
2642 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2643 done:
2644 	usb_autopm_put_interface(dev->intf);
2645 
2646 out:
2647 	return ret;
2648 }
2649 
2650 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2651 {
2652 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2653 	DECLARE_WAITQUEUE(wait, current);
2654 	int temp;
2655 
2656 	/* ensure there are no more active urbs */
2657 	add_wait_queue(&unlink_wakeup, &wait);
2658 	set_current_state(TASK_UNINTERRUPTIBLE);
2659 	dev->wait = &unlink_wakeup;
2660 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2661 
2662 	/* maybe wait for deletions to finish. */
2663 	while (!skb_queue_empty(&dev->rxq) &&
2664 	       !skb_queue_empty(&dev->txq) &&
2665 	       !skb_queue_empty(&dev->done)) {
2666 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2667 		set_current_state(TASK_UNINTERRUPTIBLE);
2668 		netif_dbg(dev, ifdown, dev->net,
2669 			  "waited for %d urb completions\n", temp);
2670 	}
2671 	set_current_state(TASK_RUNNING);
2672 	dev->wait = NULL;
2673 	remove_wait_queue(&unlink_wakeup, &wait);
2674 }
2675 
2676 static int lan78xx_stop(struct net_device *net)
2677 {
2678 	struct lan78xx_net *dev = netdev_priv(net);
2679 
2680 	if (timer_pending(&dev->stat_monitor))
2681 		del_timer_sync(&dev->stat_monitor);
2682 
2683 	if (net->phydev)
2684 		phy_stop(net->phydev);
2685 
2686 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2687 	netif_stop_queue(net);
2688 
2689 	netif_info(dev, ifdown, dev->net,
2690 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2691 		   net->stats.rx_packets, net->stats.tx_packets,
2692 		   net->stats.rx_errors, net->stats.tx_errors);
2693 
2694 	lan78xx_terminate_urbs(dev);
2695 
2696 	usb_kill_urb(dev->urb_intr);
2697 
2698 	skb_queue_purge(&dev->rxq_pause);
2699 
2700 	/* deferred work (task, timer, softirq) must also stop.
2701 	 * can't flush_scheduled_work() until we drop rtnl (later),
2702 	 * else workers could deadlock; so make workers a NOP.
2703 	 */
2704 	dev->flags = 0;
2705 	cancel_delayed_work_sync(&dev->wq);
2706 	tasklet_kill(&dev->bh);
2707 
2708 	usb_autopm_put_interface(dev->intf);
2709 
2710 	return 0;
2711 }
2712 
2713 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2714 				       struct sk_buff *skb, gfp_t flags)
2715 {
2716 	u32 tx_cmd_a, tx_cmd_b;
2717 	void *ptr;
2718 
2719 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2720 		dev_kfree_skb_any(skb);
2721 		return NULL;
2722 	}
2723 
2724 	if (skb_linearize(skb)) {
2725 		dev_kfree_skb_any(skb);
2726 		return NULL;
2727 	}
2728 
2729 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2730 
2731 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2732 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2733 
2734 	tx_cmd_b = 0;
2735 	if (skb_is_gso(skb)) {
2736 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2737 
2738 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2739 
2740 		tx_cmd_a |= TX_CMD_A_LSO_;
2741 	}
2742 
2743 	if (skb_vlan_tag_present(skb)) {
2744 		tx_cmd_a |= TX_CMD_A_IVTG_;
2745 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2746 	}
2747 
2748 	ptr = skb_push(skb, 8);
2749 	put_unaligned_le32(tx_cmd_a, ptr);
2750 	put_unaligned_le32(tx_cmd_b, ptr + 4);
2751 
2752 	return skb;
2753 }
2754 
2755 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2756 			       struct sk_buff_head *list, enum skb_state state)
2757 {
2758 	unsigned long flags;
2759 	enum skb_state old_state;
2760 	struct skb_data *entry = (struct skb_data *)skb->cb;
2761 
2762 	spin_lock_irqsave(&list->lock, flags);
2763 	old_state = entry->state;
2764 	entry->state = state;
2765 
2766 	__skb_unlink(skb, list);
2767 	spin_unlock(&list->lock);
2768 	spin_lock(&dev->done.lock);
2769 
2770 	__skb_queue_tail(&dev->done, skb);
2771 	if (skb_queue_len(&dev->done) == 1)
2772 		tasklet_schedule(&dev->bh);
2773 	spin_unlock_irqrestore(&dev->done.lock, flags);
2774 
2775 	return old_state;
2776 }
2777 
2778 static void tx_complete(struct urb *urb)
2779 {
2780 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2781 	struct skb_data *entry = (struct skb_data *)skb->cb;
2782 	struct lan78xx_net *dev = entry->dev;
2783 
2784 	if (urb->status == 0) {
2785 		dev->net->stats.tx_packets += entry->num_of_packet;
2786 		dev->net->stats.tx_bytes += entry->length;
2787 	} else {
2788 		dev->net->stats.tx_errors++;
2789 
2790 		switch (urb->status) {
2791 		case -EPIPE:
2792 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2793 			break;
2794 
2795 		/* software-driven interface shutdown */
2796 		case -ECONNRESET:
2797 		case -ESHUTDOWN:
2798 			break;
2799 
2800 		case -EPROTO:
2801 		case -ETIME:
2802 		case -EILSEQ:
2803 			netif_stop_queue(dev->net);
2804 			break;
2805 		default:
2806 			netif_dbg(dev, tx_err, dev->net,
2807 				  "tx err %d\n", entry->urb->status);
2808 			break;
2809 		}
2810 	}
2811 
2812 	usb_autopm_put_interface_async(dev->intf);
2813 
2814 	defer_bh(dev, skb, &dev->txq, tx_done);
2815 }
2816 
2817 static void lan78xx_queue_skb(struct sk_buff_head *list,
2818 			      struct sk_buff *newsk, enum skb_state state)
2819 {
2820 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2821 
2822 	__skb_queue_tail(list, newsk);
2823 	entry->state = state;
2824 }
2825 
2826 static netdev_tx_t
2827 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2828 {
2829 	struct lan78xx_net *dev = netdev_priv(net);
2830 	struct sk_buff *skb2 = NULL;
2831 
2832 	if (skb) {
2833 		skb_tx_timestamp(skb);
2834 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2835 	}
2836 
2837 	if (skb2) {
2838 		skb_queue_tail(&dev->txq_pend, skb2);
2839 
2840 		/* throttle TX patch at slower than SUPER SPEED USB */
2841 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2842 		    (skb_queue_len(&dev->txq_pend) > 10))
2843 			netif_stop_queue(net);
2844 	} else {
2845 		netif_dbg(dev, tx_err, dev->net,
2846 			  "lan78xx_tx_prep return NULL\n");
2847 		dev->net->stats.tx_errors++;
2848 		dev->net->stats.tx_dropped++;
2849 	}
2850 
2851 	tasklet_schedule(&dev->bh);
2852 
2853 	return NETDEV_TX_OK;
2854 }
2855 
2856 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2857 {
2858 	struct lan78xx_priv *pdata = NULL;
2859 	int ret;
2860 	int i;
2861 
2862 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2863 
2864 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2865 	if (!pdata) {
2866 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2867 		return -ENOMEM;
2868 	}
2869 
2870 	pdata->dev = dev;
2871 
2872 	spin_lock_init(&pdata->rfe_ctl_lock);
2873 	mutex_init(&pdata->dataport_mutex);
2874 
2875 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2876 
2877 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2878 		pdata->vlan_table[i] = 0;
2879 
2880 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2881 
2882 	dev->net->features = 0;
2883 
2884 	if (DEFAULT_TX_CSUM_ENABLE)
2885 		dev->net->features |= NETIF_F_HW_CSUM;
2886 
2887 	if (DEFAULT_RX_CSUM_ENABLE)
2888 		dev->net->features |= NETIF_F_RXCSUM;
2889 
2890 	if (DEFAULT_TSO_CSUM_ENABLE)
2891 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2892 
2893 	if (DEFAULT_VLAN_RX_OFFLOAD)
2894 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2895 
2896 	if (DEFAULT_VLAN_FILTER_ENABLE)
2897 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2898 
2899 	dev->net->hw_features = dev->net->features;
2900 
2901 	ret = lan78xx_setup_irq_domain(dev);
2902 	if (ret < 0) {
2903 		netdev_warn(dev->net,
2904 			    "lan78xx_setup_irq_domain() failed : %d", ret);
2905 		goto out1;
2906 	}
2907 
2908 	dev->net->hard_header_len += TX_OVERHEAD;
2909 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2910 
2911 	/* Init all registers */
2912 	ret = lan78xx_reset(dev);
2913 	if (ret) {
2914 		netdev_warn(dev->net, "Registers INIT FAILED....");
2915 		goto out2;
2916 	}
2917 
2918 	ret = lan78xx_mdio_init(dev);
2919 	if (ret) {
2920 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
2921 		goto out2;
2922 	}
2923 
2924 	dev->net->flags |= IFF_MULTICAST;
2925 
2926 	pdata->wol = WAKE_MAGIC;
2927 
2928 	return ret;
2929 
2930 out2:
2931 	lan78xx_remove_irq_domain(dev);
2932 
2933 out1:
2934 	netdev_warn(dev->net, "Bind routine FAILED");
2935 	cancel_work_sync(&pdata->set_multicast);
2936 	cancel_work_sync(&pdata->set_vlan);
2937 	kfree(pdata);
2938 	return ret;
2939 }
2940 
2941 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2942 {
2943 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2944 
2945 	lan78xx_remove_irq_domain(dev);
2946 
2947 	lan78xx_remove_mdio(dev);
2948 
2949 	if (pdata) {
2950 		cancel_work_sync(&pdata->set_multicast);
2951 		cancel_work_sync(&pdata->set_vlan);
2952 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2953 		kfree(pdata);
2954 		pdata = NULL;
2955 		dev->data[0] = 0;
2956 	}
2957 }
2958 
2959 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2960 				    struct sk_buff *skb,
2961 				    u32 rx_cmd_a, u32 rx_cmd_b)
2962 {
2963 	/* HW Checksum offload appears to be flawed if used when not stripping
2964 	 * VLAN headers. Drop back to S/W checksums under these conditions.
2965 	 */
2966 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2967 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
2968 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
2969 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
2970 		skb->ip_summed = CHECKSUM_NONE;
2971 	} else {
2972 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2973 		skb->ip_summed = CHECKSUM_COMPLETE;
2974 	}
2975 }
2976 
2977 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
2978 				    struct sk_buff *skb,
2979 				    u32 rx_cmd_a, u32 rx_cmd_b)
2980 {
2981 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2982 	    (rx_cmd_a & RX_CMD_A_FVTG_))
2983 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2984 				       (rx_cmd_b & 0xffff));
2985 }
2986 
2987 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2988 {
2989 	int status;
2990 
2991 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2992 		skb_queue_tail(&dev->rxq_pause, skb);
2993 		return;
2994 	}
2995 
2996 	dev->net->stats.rx_packets++;
2997 	dev->net->stats.rx_bytes += skb->len;
2998 
2999 	skb->protocol = eth_type_trans(skb, dev->net);
3000 
3001 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3002 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3003 	memset(skb->cb, 0, sizeof(struct skb_data));
3004 
3005 	if (skb_defer_rx_timestamp(skb))
3006 		return;
3007 
3008 	status = netif_rx(skb);
3009 	if (status != NET_RX_SUCCESS)
3010 		netif_dbg(dev, rx_err, dev->net,
3011 			  "netif_rx status %d\n", status);
3012 }
3013 
3014 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3015 {
3016 	if (skb->len < dev->net->hard_header_len)
3017 		return 0;
3018 
3019 	while (skb->len > 0) {
3020 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3021 		u16 rx_cmd_c;
3022 		struct sk_buff *skb2;
3023 		unsigned char *packet;
3024 
3025 		rx_cmd_a = get_unaligned_le32(skb->data);
3026 		skb_pull(skb, sizeof(rx_cmd_a));
3027 
3028 		rx_cmd_b = get_unaligned_le32(skb->data);
3029 		skb_pull(skb, sizeof(rx_cmd_b));
3030 
3031 		rx_cmd_c = get_unaligned_le16(skb->data);
3032 		skb_pull(skb, sizeof(rx_cmd_c));
3033 
3034 		packet = skb->data;
3035 
3036 		/* get the packet length */
3037 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3038 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3039 
3040 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3041 			netif_dbg(dev, rx_err, dev->net,
3042 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3043 		} else {
3044 			/* last frame in this batch */
3045 			if (skb->len == size) {
3046 				lan78xx_rx_csum_offload(dev, skb,
3047 							rx_cmd_a, rx_cmd_b);
3048 				lan78xx_rx_vlan_offload(dev, skb,
3049 							rx_cmd_a, rx_cmd_b);
3050 
3051 				skb_trim(skb, skb->len - 4); /* remove fcs */
3052 				skb->truesize = size + sizeof(struct sk_buff);
3053 
3054 				return 1;
3055 			}
3056 
3057 			skb2 = skb_clone(skb, GFP_ATOMIC);
3058 			if (unlikely(!skb2)) {
3059 				netdev_warn(dev->net, "Error allocating skb");
3060 				return 0;
3061 			}
3062 
3063 			skb2->len = size;
3064 			skb2->data = packet;
3065 			skb_set_tail_pointer(skb2, size);
3066 
3067 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3068 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3069 
3070 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3071 			skb2->truesize = size + sizeof(struct sk_buff);
3072 
3073 			lan78xx_skb_return(dev, skb2);
3074 		}
3075 
3076 		skb_pull(skb, size);
3077 
3078 		/* padding bytes before the next frame starts */
3079 		if (skb->len)
3080 			skb_pull(skb, align_count);
3081 	}
3082 
3083 	return 1;
3084 }
3085 
3086 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3087 {
3088 	if (!lan78xx_rx(dev, skb)) {
3089 		dev->net->stats.rx_errors++;
3090 		goto done;
3091 	}
3092 
3093 	if (skb->len) {
3094 		lan78xx_skb_return(dev, skb);
3095 		return;
3096 	}
3097 
3098 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3099 	dev->net->stats.rx_errors++;
3100 done:
3101 	skb_queue_tail(&dev->done, skb);
3102 }
3103 
3104 static void rx_complete(struct urb *urb);
3105 
3106 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3107 {
3108 	struct sk_buff *skb;
3109 	struct skb_data *entry;
3110 	unsigned long lockflags;
3111 	size_t size = dev->rx_urb_size;
3112 	int ret = 0;
3113 
3114 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3115 	if (!skb) {
3116 		usb_free_urb(urb);
3117 		return -ENOMEM;
3118 	}
3119 
3120 	entry = (struct skb_data *)skb->cb;
3121 	entry->urb = urb;
3122 	entry->dev = dev;
3123 	entry->length = 0;
3124 
3125 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3126 			  skb->data, size, rx_complete, skb);
3127 
3128 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3129 
3130 	if (netif_device_present(dev->net) &&
3131 	    netif_running(dev->net) &&
3132 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3133 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3134 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3135 		switch (ret) {
3136 		case 0:
3137 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3138 			break;
3139 		case -EPIPE:
3140 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3141 			break;
3142 		case -ENODEV:
3143 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3144 			netif_device_detach(dev->net);
3145 			break;
3146 		case -EHOSTUNREACH:
3147 			ret = -ENOLINK;
3148 			break;
3149 		default:
3150 			netif_dbg(dev, rx_err, dev->net,
3151 				  "rx submit, %d\n", ret);
3152 			tasklet_schedule(&dev->bh);
3153 		}
3154 	} else {
3155 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3156 		ret = -ENOLINK;
3157 	}
3158 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3159 	if (ret) {
3160 		dev_kfree_skb_any(skb);
3161 		usb_free_urb(urb);
3162 	}
3163 	return ret;
3164 }
3165 
3166 static void rx_complete(struct urb *urb)
3167 {
3168 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3169 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3170 	struct lan78xx_net *dev = entry->dev;
3171 	int urb_status = urb->status;
3172 	enum skb_state state;
3173 
3174 	skb_put(skb, urb->actual_length);
3175 	state = rx_done;
3176 	entry->urb = NULL;
3177 
3178 	switch (urb_status) {
3179 	case 0:
3180 		if (skb->len < dev->net->hard_header_len) {
3181 			state = rx_cleanup;
3182 			dev->net->stats.rx_errors++;
3183 			dev->net->stats.rx_length_errors++;
3184 			netif_dbg(dev, rx_err, dev->net,
3185 				  "rx length %d\n", skb->len);
3186 		}
3187 		usb_mark_last_busy(dev->udev);
3188 		break;
3189 	case -EPIPE:
3190 		dev->net->stats.rx_errors++;
3191 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3192 		fallthrough;
3193 	case -ECONNRESET:				/* async unlink */
3194 	case -ESHUTDOWN:				/* hardware gone */
3195 		netif_dbg(dev, ifdown, dev->net,
3196 			  "rx shutdown, code %d\n", urb_status);
3197 		state = rx_cleanup;
3198 		entry->urb = urb;
3199 		urb = NULL;
3200 		break;
3201 	case -EPROTO:
3202 	case -ETIME:
3203 	case -EILSEQ:
3204 		dev->net->stats.rx_errors++;
3205 		state = rx_cleanup;
3206 		entry->urb = urb;
3207 		urb = NULL;
3208 		break;
3209 
3210 	/* data overrun ... flush fifo? */
3211 	case -EOVERFLOW:
3212 		dev->net->stats.rx_over_errors++;
3213 		fallthrough;
3214 
3215 	default:
3216 		state = rx_cleanup;
3217 		dev->net->stats.rx_errors++;
3218 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3219 		break;
3220 	}
3221 
3222 	state = defer_bh(dev, skb, &dev->rxq, state);
3223 
3224 	if (urb) {
3225 		if (netif_running(dev->net) &&
3226 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3227 		    state != unlink_start) {
3228 			rx_submit(dev, urb, GFP_ATOMIC);
3229 			return;
3230 		}
3231 		usb_free_urb(urb);
3232 	}
3233 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3234 }
3235 
3236 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3237 {
3238 	int length;
3239 	struct urb *urb = NULL;
3240 	struct skb_data *entry;
3241 	unsigned long flags;
3242 	struct sk_buff_head *tqp = &dev->txq_pend;
3243 	struct sk_buff *skb, *skb2;
3244 	int ret;
3245 	int count, pos;
3246 	int skb_totallen, pkt_cnt;
3247 
3248 	skb_totallen = 0;
3249 	pkt_cnt = 0;
3250 	count = 0;
3251 	length = 0;
3252 	spin_lock_irqsave(&tqp->lock, flags);
3253 	skb_queue_walk(tqp, skb) {
3254 		if (skb_is_gso(skb)) {
3255 			if (!skb_queue_is_first(tqp, skb)) {
3256 				/* handle previous packets first */
3257 				break;
3258 			}
3259 			count = 1;
3260 			length = skb->len - TX_OVERHEAD;
3261 			__skb_unlink(skb, tqp);
3262 			spin_unlock_irqrestore(&tqp->lock, flags);
3263 			goto gso_skb;
3264 		}
3265 
3266 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3267 			break;
3268 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3269 		pkt_cnt++;
3270 	}
3271 	spin_unlock_irqrestore(&tqp->lock, flags);
3272 
3273 	/* copy to a single skb */
3274 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3275 	if (!skb)
3276 		goto drop;
3277 
3278 	skb_put(skb, skb_totallen);
3279 
3280 	for (count = pos = 0; count < pkt_cnt; count++) {
3281 		skb2 = skb_dequeue(tqp);
3282 		if (skb2) {
3283 			length += (skb2->len - TX_OVERHEAD);
3284 			memcpy(skb->data + pos, skb2->data, skb2->len);
3285 			pos += roundup(skb2->len, sizeof(u32));
3286 			dev_kfree_skb(skb2);
3287 		}
3288 	}
3289 
3290 gso_skb:
3291 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3292 	if (!urb)
3293 		goto drop;
3294 
3295 	entry = (struct skb_data *)skb->cb;
3296 	entry->urb = urb;
3297 	entry->dev = dev;
3298 	entry->length = length;
3299 	entry->num_of_packet = count;
3300 
3301 	spin_lock_irqsave(&dev->txq.lock, flags);
3302 	ret = usb_autopm_get_interface_async(dev->intf);
3303 	if (ret < 0) {
3304 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3305 		goto drop;
3306 	}
3307 
3308 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3309 			  skb->data, skb->len, tx_complete, skb);
3310 
3311 	if (length % dev->maxpacket == 0) {
3312 		/* send USB_ZERO_PACKET */
3313 		urb->transfer_flags |= URB_ZERO_PACKET;
3314 	}
3315 
3316 #ifdef CONFIG_PM
3317 	/* if this triggers the device is still a sleep */
3318 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3319 		/* transmission will be done in resume */
3320 		usb_anchor_urb(urb, &dev->deferred);
3321 		/* no use to process more packets */
3322 		netif_stop_queue(dev->net);
3323 		usb_put_urb(urb);
3324 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3325 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3326 		return;
3327 	}
3328 #endif
3329 
3330 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3331 	switch (ret) {
3332 	case 0:
3333 		netif_trans_update(dev->net);
3334 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3335 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3336 			netif_stop_queue(dev->net);
3337 		break;
3338 	case -EPIPE:
3339 		netif_stop_queue(dev->net);
3340 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3341 		usb_autopm_put_interface_async(dev->intf);
3342 		break;
3343 	default:
3344 		usb_autopm_put_interface_async(dev->intf);
3345 		netif_dbg(dev, tx_err, dev->net,
3346 			  "tx: submit urb err %d\n", ret);
3347 		break;
3348 	}
3349 
3350 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3351 
3352 	if (ret) {
3353 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3354 drop:
3355 		dev->net->stats.tx_dropped++;
3356 		if (skb)
3357 			dev_kfree_skb_any(skb);
3358 		usb_free_urb(urb);
3359 	} else
3360 		netif_dbg(dev, tx_queued, dev->net,
3361 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3362 }
3363 
3364 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3365 {
3366 	struct urb *urb;
3367 	int i;
3368 
3369 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3370 		for (i = 0; i < 10; i++) {
3371 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3372 				break;
3373 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3374 			if (urb)
3375 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3376 					return;
3377 		}
3378 
3379 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3380 			tasklet_schedule(&dev->bh);
3381 	}
3382 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3383 		netif_wake_queue(dev->net);
3384 }
3385 
3386 static void lan78xx_bh(struct tasklet_struct *t)
3387 {
3388 	struct lan78xx_net *dev = from_tasklet(dev, t, bh);
3389 	struct sk_buff *skb;
3390 	struct skb_data *entry;
3391 
3392 	while ((skb = skb_dequeue(&dev->done))) {
3393 		entry = (struct skb_data *)(skb->cb);
3394 		switch (entry->state) {
3395 		case rx_done:
3396 			entry->state = rx_cleanup;
3397 			rx_process(dev, skb);
3398 			continue;
3399 		case tx_done:
3400 			usb_free_urb(entry->urb);
3401 			dev_kfree_skb(skb);
3402 			continue;
3403 		case rx_cleanup:
3404 			usb_free_urb(entry->urb);
3405 			dev_kfree_skb(skb);
3406 			continue;
3407 		default:
3408 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3409 			return;
3410 		}
3411 	}
3412 
3413 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3414 		/* reset update timer delta */
3415 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3416 			dev->delta = 1;
3417 			mod_timer(&dev->stat_monitor,
3418 				  jiffies + STAT_UPDATE_TIMER);
3419 		}
3420 
3421 		if (!skb_queue_empty(&dev->txq_pend))
3422 			lan78xx_tx_bh(dev);
3423 
3424 		if (!timer_pending(&dev->delay) &&
3425 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3426 			lan78xx_rx_bh(dev);
3427 	}
3428 }
3429 
3430 static void lan78xx_delayedwork(struct work_struct *work)
3431 {
3432 	int status;
3433 	struct lan78xx_net *dev;
3434 
3435 	dev = container_of(work, struct lan78xx_net, wq.work);
3436 
3437 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3438 		unlink_urbs(dev, &dev->txq);
3439 		status = usb_autopm_get_interface(dev->intf);
3440 		if (status < 0)
3441 			goto fail_pipe;
3442 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3443 		usb_autopm_put_interface(dev->intf);
3444 		if (status < 0 &&
3445 		    status != -EPIPE &&
3446 		    status != -ESHUTDOWN) {
3447 			if (netif_msg_tx_err(dev))
3448 fail_pipe:
3449 				netdev_err(dev->net,
3450 					   "can't clear tx halt, status %d\n",
3451 					   status);
3452 		} else {
3453 			clear_bit(EVENT_TX_HALT, &dev->flags);
3454 			if (status != -ESHUTDOWN)
3455 				netif_wake_queue(dev->net);
3456 		}
3457 	}
3458 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3459 		unlink_urbs(dev, &dev->rxq);
3460 		status = usb_autopm_get_interface(dev->intf);
3461 		if (status < 0)
3462 				goto fail_halt;
3463 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3464 		usb_autopm_put_interface(dev->intf);
3465 		if (status < 0 &&
3466 		    status != -EPIPE &&
3467 		    status != -ESHUTDOWN) {
3468 			if (netif_msg_rx_err(dev))
3469 fail_halt:
3470 				netdev_err(dev->net,
3471 					   "can't clear rx halt, status %d\n",
3472 					   status);
3473 		} else {
3474 			clear_bit(EVENT_RX_HALT, &dev->flags);
3475 			tasklet_schedule(&dev->bh);
3476 		}
3477 	}
3478 
3479 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3480 		int ret = 0;
3481 
3482 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3483 		status = usb_autopm_get_interface(dev->intf);
3484 		if (status < 0)
3485 			goto skip_reset;
3486 		if (lan78xx_link_reset(dev) < 0) {
3487 			usb_autopm_put_interface(dev->intf);
3488 skip_reset:
3489 			netdev_info(dev->net, "link reset failed (%d)\n",
3490 				    ret);
3491 		} else {
3492 			usb_autopm_put_interface(dev->intf);
3493 		}
3494 	}
3495 
3496 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3497 		lan78xx_update_stats(dev);
3498 
3499 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3500 
3501 		mod_timer(&dev->stat_monitor,
3502 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3503 
3504 		dev->delta = min((dev->delta * 2), 50);
3505 	}
3506 }
3507 
3508 static void intr_complete(struct urb *urb)
3509 {
3510 	struct lan78xx_net *dev = urb->context;
3511 	int status = urb->status;
3512 
3513 	switch (status) {
3514 	/* success */
3515 	case 0:
3516 		lan78xx_status(dev, urb);
3517 		break;
3518 
3519 	/* software-driven interface shutdown */
3520 	case -ENOENT:			/* urb killed */
3521 	case -ESHUTDOWN:		/* hardware gone */
3522 		netif_dbg(dev, ifdown, dev->net,
3523 			  "intr shutdown, code %d\n", status);
3524 		return;
3525 
3526 	/* NOTE:  not throttling like RX/TX, since this endpoint
3527 	 * already polls infrequently
3528 	 */
3529 	default:
3530 		netdev_dbg(dev->net, "intr status %d\n", status);
3531 		break;
3532 	}
3533 
3534 	if (!netif_running(dev->net))
3535 		return;
3536 
3537 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3538 	status = usb_submit_urb(urb, GFP_ATOMIC);
3539 	if (status != 0)
3540 		netif_err(dev, timer, dev->net,
3541 			  "intr resubmit --> %d\n", status);
3542 }
3543 
3544 static void lan78xx_disconnect(struct usb_interface *intf)
3545 {
3546 	struct lan78xx_net *dev;
3547 	struct usb_device *udev;
3548 	struct net_device *net;
3549 	struct phy_device *phydev;
3550 
3551 	dev = usb_get_intfdata(intf);
3552 	usb_set_intfdata(intf, NULL);
3553 	if (!dev)
3554 		return;
3555 
3556 	udev = interface_to_usbdev(intf);
3557 	net = dev->net;
3558 	phydev = net->phydev;
3559 
3560 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3561 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3562 
3563 	phy_disconnect(net->phydev);
3564 
3565 	if (phy_is_pseudo_fixed_link(phydev))
3566 		fixed_phy_unregister(phydev);
3567 
3568 	unregister_netdev(net);
3569 
3570 	cancel_delayed_work_sync(&dev->wq);
3571 
3572 	usb_scuttle_anchored_urbs(&dev->deferred);
3573 
3574 	lan78xx_unbind(dev, intf);
3575 
3576 	usb_kill_urb(dev->urb_intr);
3577 	usb_free_urb(dev->urb_intr);
3578 
3579 	free_netdev(net);
3580 	usb_put_dev(udev);
3581 }
3582 
3583 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3584 {
3585 	struct lan78xx_net *dev = netdev_priv(net);
3586 
3587 	unlink_urbs(dev, &dev->txq);
3588 	tasklet_schedule(&dev->bh);
3589 }
3590 
3591 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3592 						struct net_device *netdev,
3593 						netdev_features_t features)
3594 {
3595 	if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3596 		features &= ~NETIF_F_GSO_MASK;
3597 
3598 	features = vlan_features_check(skb, features);
3599 	features = vxlan_features_check(skb, features);
3600 
3601 	return features;
3602 }
3603 
3604 static const struct net_device_ops lan78xx_netdev_ops = {
3605 	.ndo_open		= lan78xx_open,
3606 	.ndo_stop		= lan78xx_stop,
3607 	.ndo_start_xmit		= lan78xx_start_xmit,
3608 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3609 	.ndo_change_mtu		= lan78xx_change_mtu,
3610 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3611 	.ndo_validate_addr	= eth_validate_addr,
3612 	.ndo_do_ioctl		= phy_do_ioctl_running,
3613 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3614 	.ndo_set_features	= lan78xx_set_features,
3615 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3616 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3617 	.ndo_features_check	= lan78xx_features_check,
3618 };
3619 
3620 static void lan78xx_stat_monitor(struct timer_list *t)
3621 {
3622 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3623 
3624 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3625 }
3626 
3627 static int lan78xx_probe(struct usb_interface *intf,
3628 			 const struct usb_device_id *id)
3629 {
3630 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3631 	struct lan78xx_net *dev;
3632 	struct net_device *netdev;
3633 	struct usb_device *udev;
3634 	int ret;
3635 	unsigned maxp;
3636 	unsigned period;
3637 	u8 *buf = NULL;
3638 
3639 	udev = interface_to_usbdev(intf);
3640 	udev = usb_get_dev(udev);
3641 
3642 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3643 	if (!netdev) {
3644 		dev_err(&intf->dev, "Error: OOM\n");
3645 		ret = -ENOMEM;
3646 		goto out1;
3647 	}
3648 
3649 	/* netdev_printk() needs this */
3650 	SET_NETDEV_DEV(netdev, &intf->dev);
3651 
3652 	dev = netdev_priv(netdev);
3653 	dev->udev = udev;
3654 	dev->intf = intf;
3655 	dev->net = netdev;
3656 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3657 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3658 
3659 	skb_queue_head_init(&dev->rxq);
3660 	skb_queue_head_init(&dev->txq);
3661 	skb_queue_head_init(&dev->done);
3662 	skb_queue_head_init(&dev->rxq_pause);
3663 	skb_queue_head_init(&dev->txq_pend);
3664 	mutex_init(&dev->phy_mutex);
3665 
3666 	tasklet_setup(&dev->bh, lan78xx_bh);
3667 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3668 	init_usb_anchor(&dev->deferred);
3669 
3670 	netdev->netdev_ops = &lan78xx_netdev_ops;
3671 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3672 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3673 
3674 	dev->delta = 1;
3675 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3676 
3677 	mutex_init(&dev->stats.access_lock);
3678 
3679 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3680 		ret = -ENODEV;
3681 		goto out2;
3682 	}
3683 
3684 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3685 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3686 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3687 		ret = -ENODEV;
3688 		goto out2;
3689 	}
3690 
3691 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3692 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3693 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3694 		ret = -ENODEV;
3695 		goto out2;
3696 	}
3697 
3698 	ep_intr = &intf->cur_altsetting->endpoint[2];
3699 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3700 		ret = -ENODEV;
3701 		goto out2;
3702 	}
3703 
3704 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3705 					usb_endpoint_num(&ep_intr->desc));
3706 
3707 	ret = lan78xx_bind(dev, intf);
3708 	if (ret < 0)
3709 		goto out2;
3710 
3711 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3712 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3713 
3714 	/* MTU range: 68 - 9000 */
3715 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3716 	netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3717 
3718 	period = ep_intr->desc.bInterval;
3719 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3720 	buf = kmalloc(maxp, GFP_KERNEL);
3721 	if (buf) {
3722 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3723 		if (!dev->urb_intr) {
3724 			ret = -ENOMEM;
3725 			kfree(buf);
3726 			goto out3;
3727 		} else {
3728 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3729 					 dev->pipe_intr, buf, maxp,
3730 					 intr_complete, dev, period);
3731 			dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3732 		}
3733 	}
3734 
3735 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3736 
3737 	/* driver requires remote-wakeup capability during autosuspend. */
3738 	intf->needs_remote_wakeup = 1;
3739 
3740 	ret = lan78xx_phy_init(dev);
3741 	if (ret < 0)
3742 		goto out4;
3743 
3744 	ret = register_netdev(netdev);
3745 	if (ret != 0) {
3746 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3747 		goto out5;
3748 	}
3749 
3750 	usb_set_intfdata(intf, dev);
3751 
3752 	ret = device_set_wakeup_enable(&udev->dev, true);
3753 
3754 	 /* Default delay of 2sec has more overhead than advantage.
3755 	  * Set to 10sec as default.
3756 	  */
3757 	pm_runtime_set_autosuspend_delay(&udev->dev,
3758 					 DEFAULT_AUTOSUSPEND_DELAY);
3759 
3760 	return 0;
3761 
3762 out5:
3763 	phy_disconnect(netdev->phydev);
3764 out4:
3765 	usb_free_urb(dev->urb_intr);
3766 out3:
3767 	lan78xx_unbind(dev, intf);
3768 out2:
3769 	free_netdev(netdev);
3770 out1:
3771 	usb_put_dev(udev);
3772 
3773 	return ret;
3774 }
3775 
3776 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3777 {
3778 	const u16 crc16poly = 0x8005;
3779 	int i;
3780 	u16 bit, crc, msb;
3781 	u8 data;
3782 
3783 	crc = 0xFFFF;
3784 	for (i = 0; i < len; i++) {
3785 		data = *buf++;
3786 		for (bit = 0; bit < 8; bit++) {
3787 			msb = crc >> 15;
3788 			crc <<= 1;
3789 
3790 			if (msb ^ (u16)(data & 1)) {
3791 				crc ^= crc16poly;
3792 				crc |= (u16)0x0001U;
3793 			}
3794 			data >>= 1;
3795 		}
3796 	}
3797 
3798 	return crc;
3799 }
3800 
3801 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3802 {
3803 	u32 buf;
3804 	int mask_index;
3805 	u16 crc;
3806 	u32 temp_wucsr;
3807 	u32 temp_pmt_ctl;
3808 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3809 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3810 	const u8 arp_type[2] = { 0x08, 0x06 };
3811 
3812 	lan78xx_read_reg(dev, MAC_TX, &buf);
3813 	buf &= ~MAC_TX_TXEN_;
3814 	lan78xx_write_reg(dev, MAC_TX, buf);
3815 	lan78xx_read_reg(dev, MAC_RX, &buf);
3816 	buf &= ~MAC_RX_RXEN_;
3817 	lan78xx_write_reg(dev, MAC_RX, buf);
3818 
3819 	lan78xx_write_reg(dev, WUCSR, 0);
3820 	lan78xx_write_reg(dev, WUCSR2, 0);
3821 	lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3822 
3823 	temp_wucsr = 0;
3824 
3825 	temp_pmt_ctl = 0;
3826 	lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3827 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3828 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3829 
3830 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3831 		lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3832 
3833 	mask_index = 0;
3834 	if (wol & WAKE_PHY) {
3835 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3836 
3837 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3838 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3839 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3840 	}
3841 	if (wol & WAKE_MAGIC) {
3842 		temp_wucsr |= WUCSR_MPEN_;
3843 
3844 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3845 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3846 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3847 	}
3848 	if (wol & WAKE_BCAST) {
3849 		temp_wucsr |= WUCSR_BCST_EN_;
3850 
3851 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3852 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3853 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3854 	}
3855 	if (wol & WAKE_MCAST) {
3856 		temp_wucsr |= WUCSR_WAKE_EN_;
3857 
3858 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3859 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3860 		lan78xx_write_reg(dev, WUF_CFG(mask_index),
3861 					WUF_CFGX_EN_ |
3862 					WUF_CFGX_TYPE_MCAST_ |
3863 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3864 					(crc & WUF_CFGX_CRC16_MASK_));
3865 
3866 		lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3867 		lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3868 		lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3869 		lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3870 		mask_index++;
3871 
3872 		/* for IPv6 Multicast */
3873 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3874 		lan78xx_write_reg(dev, WUF_CFG(mask_index),
3875 					WUF_CFGX_EN_ |
3876 					WUF_CFGX_TYPE_MCAST_ |
3877 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3878 					(crc & WUF_CFGX_CRC16_MASK_));
3879 
3880 		lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3881 		lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3882 		lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3883 		lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3884 		mask_index++;
3885 
3886 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3887 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3888 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3889 	}
3890 	if (wol & WAKE_UCAST) {
3891 		temp_wucsr |= WUCSR_PFDA_EN_;
3892 
3893 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3894 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3895 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3896 	}
3897 	if (wol & WAKE_ARP) {
3898 		temp_wucsr |= WUCSR_WAKE_EN_;
3899 
3900 		/* set WUF_CFG & WUF_MASK
3901 		 * for packettype (offset 12,13) = ARP (0x0806)
3902 		 */
3903 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3904 		lan78xx_write_reg(dev, WUF_CFG(mask_index),
3905 					WUF_CFGX_EN_ |
3906 					WUF_CFGX_TYPE_ALL_ |
3907 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3908 					(crc & WUF_CFGX_CRC16_MASK_));
3909 
3910 		lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3911 		lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3912 		lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3913 		lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3914 		mask_index++;
3915 
3916 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3917 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3918 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3919 	}
3920 
3921 	lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3922 
3923 	/* when multiple WOL bits are set */
3924 	if (hweight_long((unsigned long)wol) > 1) {
3925 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3926 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3927 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3928 	}
3929 	lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3930 
3931 	/* clear WUPS */
3932 	lan78xx_read_reg(dev, PMT_CTL, &buf);
3933 	buf |= PMT_CTL_WUPS_MASK_;
3934 	lan78xx_write_reg(dev, PMT_CTL, buf);
3935 
3936 	lan78xx_read_reg(dev, MAC_RX, &buf);
3937 	buf |= MAC_RX_RXEN_;
3938 	lan78xx_write_reg(dev, MAC_RX, buf);
3939 
3940 	return 0;
3941 }
3942 
3943 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3944 {
3945 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3946 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3947 	u32 buf;
3948 	int ret;
3949 
3950 	if (!dev->suspend_count++) {
3951 		spin_lock_irq(&dev->txq.lock);
3952 		/* don't autosuspend while transmitting */
3953 		if ((skb_queue_len(&dev->txq) ||
3954 		     skb_queue_len(&dev->txq_pend)) &&
3955 			PMSG_IS_AUTO(message)) {
3956 			spin_unlock_irq(&dev->txq.lock);
3957 			ret = -EBUSY;
3958 			goto out;
3959 		} else {
3960 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3961 			spin_unlock_irq(&dev->txq.lock);
3962 		}
3963 
3964 		/* stop TX & RX */
3965 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3966 		buf &= ~MAC_TX_TXEN_;
3967 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3968 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3969 		buf &= ~MAC_RX_RXEN_;
3970 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3971 
3972 		/* empty out the rx and queues */
3973 		netif_device_detach(dev->net);
3974 		lan78xx_terminate_urbs(dev);
3975 		usb_kill_urb(dev->urb_intr);
3976 
3977 		/* reattach */
3978 		netif_device_attach(dev->net);
3979 	}
3980 
3981 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3982 		del_timer(&dev->stat_monitor);
3983 
3984 		if (PMSG_IS_AUTO(message)) {
3985 			/* auto suspend (selective suspend) */
3986 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3987 			buf &= ~MAC_TX_TXEN_;
3988 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3989 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3990 			buf &= ~MAC_RX_RXEN_;
3991 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3992 
3993 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3994 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3995 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3996 
3997 			/* set goodframe wakeup */
3998 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3999 
4000 			buf |= WUCSR_RFE_WAKE_EN_;
4001 			buf |= WUCSR_STORE_WAKE_;
4002 
4003 			ret = lan78xx_write_reg(dev, WUCSR, buf);
4004 
4005 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4006 
4007 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4008 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
4009 
4010 			buf |= PMT_CTL_PHY_WAKE_EN_;
4011 			buf |= PMT_CTL_WOL_EN_;
4012 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
4013 			buf |= PMT_CTL_SUS_MODE_3_;
4014 
4015 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4016 
4017 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4018 
4019 			buf |= PMT_CTL_WUPS_MASK_;
4020 
4021 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4022 
4023 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4024 			buf |= MAC_RX_RXEN_;
4025 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4026 		} else {
4027 			lan78xx_set_suspend(dev, pdata->wol);
4028 		}
4029 	}
4030 
4031 	ret = 0;
4032 out:
4033 	return ret;
4034 }
4035 
4036 static int lan78xx_resume(struct usb_interface *intf)
4037 {
4038 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4039 	struct sk_buff *skb;
4040 	struct urb *res;
4041 	int ret;
4042 	u32 buf;
4043 
4044 	if (!timer_pending(&dev->stat_monitor)) {
4045 		dev->delta = 1;
4046 		mod_timer(&dev->stat_monitor,
4047 			  jiffies + STAT_UPDATE_TIMER);
4048 	}
4049 
4050 	if (!--dev->suspend_count) {
4051 		/* resume interrupt URBs */
4052 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4053 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
4054 
4055 		spin_lock_irq(&dev->txq.lock);
4056 		while ((res = usb_get_from_anchor(&dev->deferred))) {
4057 			skb = (struct sk_buff *)res->context;
4058 			ret = usb_submit_urb(res, GFP_ATOMIC);
4059 			if (ret < 0) {
4060 				dev_kfree_skb_any(skb);
4061 				usb_free_urb(res);
4062 				usb_autopm_put_interface_async(dev->intf);
4063 			} else {
4064 				netif_trans_update(dev->net);
4065 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
4066 			}
4067 		}
4068 
4069 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4070 		spin_unlock_irq(&dev->txq.lock);
4071 
4072 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4073 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4074 				netif_start_queue(dev->net);
4075 			tasklet_schedule(&dev->bh);
4076 		}
4077 	}
4078 
4079 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4080 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4081 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4082 
4083 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4084 					     WUCSR2_ARP_RCD_ |
4085 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4086 					     WUCSR2_IPV4_TCPSYN_RCD_);
4087 
4088 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4089 					    WUCSR_EEE_RX_WAKE_ |
4090 					    WUCSR_PFDA_FR_ |
4091 					    WUCSR_RFE_WAKE_FR_ |
4092 					    WUCSR_WUFR_ |
4093 					    WUCSR_MPR_ |
4094 					    WUCSR_BCST_FR_);
4095 
4096 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4097 	buf |= MAC_TX_TXEN_;
4098 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
4099 
4100 	return 0;
4101 }
4102 
4103 static int lan78xx_reset_resume(struct usb_interface *intf)
4104 {
4105 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4106 
4107 	lan78xx_reset(dev);
4108 
4109 	phy_start(dev->net->phydev);
4110 
4111 	return lan78xx_resume(intf);
4112 }
4113 
4114 static const struct usb_device_id products[] = {
4115 	{
4116 	/* LAN7800 USB Gigabit Ethernet Device */
4117 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4118 	},
4119 	{
4120 	/* LAN7850 USB Gigabit Ethernet Device */
4121 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4122 	},
4123 	{
4124 	/* LAN7801 USB Gigabit Ethernet Device */
4125 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4126 	},
4127 	{},
4128 };
4129 MODULE_DEVICE_TABLE(usb, products);
4130 
4131 static struct usb_driver lan78xx_driver = {
4132 	.name			= DRIVER_NAME,
4133 	.id_table		= products,
4134 	.probe			= lan78xx_probe,
4135 	.disconnect		= lan78xx_disconnect,
4136 	.suspend		= lan78xx_suspend,
4137 	.resume			= lan78xx_resume,
4138 	.reset_resume		= lan78xx_reset_resume,
4139 	.supports_autosuspend	= 1,
4140 	.disable_hub_initiated_lpm = 1,
4141 };
4142 
4143 module_usb_driver(lan78xx_driver);
4144 
4145 MODULE_AUTHOR(DRIVER_AUTHOR);
4146 MODULE_DESCRIPTION(DRIVER_DESC);
4147 MODULE_LICENSE("GPL");
4148