xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision ee8ec048)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_OVERHEAD			(8)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 
80 #define	MII_READ			1
81 #define	MII_WRITE			0
82 
83 #define EEPROM_INDICATOR		(0xA5)
84 #define EEPROM_MAC_OFFSET		(0x01)
85 #define MAX_EEPROM_SIZE			512
86 #define OTP_INDICATOR_1			(0xF3)
87 #define OTP_INDICATOR_2			(0xF7)
88 
89 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
90 					 WAKE_MCAST | WAKE_BCAST | \
91 					 WAKE_ARP | WAKE_MAGIC)
92 
93 /* USB related defines */
94 #define BULK_IN_PIPE			1
95 #define BULK_OUT_PIPE			2
96 
97 /* default autosuspend delay (mSec)*/
98 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
99 
100 /* statistic update interval (mSec) */
101 #define STAT_UPDATE_TIMER		(1 * 1000)
102 
103 /* time to wait for MAC or FCT to stop (jiffies) */
104 #define HW_DISABLE_TIMEOUT		(HZ / 10)
105 
106 /* time to wait between polling MAC or FCT state (ms) */
107 #define HW_DISABLE_DELAY_MS		1
108 
109 /* defines interrupts from interrupt EP */
110 #define MAX_INT_EP			(32)
111 #define INT_EP_INTEP			(31)
112 #define INT_EP_OTP_WR_DONE		(28)
113 #define INT_EP_EEE_TX_LPI_START		(26)
114 #define INT_EP_EEE_TX_LPI_STOP		(25)
115 #define INT_EP_EEE_RX_LPI		(24)
116 #define INT_EP_MAC_RESET_TIMEOUT	(23)
117 #define INT_EP_RDFO			(22)
118 #define INT_EP_TXE			(21)
119 #define INT_EP_USB_STATUS		(20)
120 #define INT_EP_TX_DIS			(19)
121 #define INT_EP_RX_DIS			(18)
122 #define INT_EP_PHY			(17)
123 #define INT_EP_DP			(16)
124 #define INT_EP_MAC_ERR			(15)
125 #define INT_EP_TDFU			(14)
126 #define INT_EP_TDFO			(13)
127 #define INT_EP_UTX			(12)
128 #define INT_EP_GPIO_11			(11)
129 #define INT_EP_GPIO_10			(10)
130 #define INT_EP_GPIO_9			(9)
131 #define INT_EP_GPIO_8			(8)
132 #define INT_EP_GPIO_7			(7)
133 #define INT_EP_GPIO_6			(6)
134 #define INT_EP_GPIO_5			(5)
135 #define INT_EP_GPIO_4			(4)
136 #define INT_EP_GPIO_3			(3)
137 #define INT_EP_GPIO_2			(2)
138 #define INT_EP_GPIO_1			(1)
139 #define INT_EP_GPIO_0			(0)
140 
141 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
142 	"RX FCS Errors",
143 	"RX Alignment Errors",
144 	"Rx Fragment Errors",
145 	"RX Jabber Errors",
146 	"RX Undersize Frame Errors",
147 	"RX Oversize Frame Errors",
148 	"RX Dropped Frames",
149 	"RX Unicast Byte Count",
150 	"RX Broadcast Byte Count",
151 	"RX Multicast Byte Count",
152 	"RX Unicast Frames",
153 	"RX Broadcast Frames",
154 	"RX Multicast Frames",
155 	"RX Pause Frames",
156 	"RX 64 Byte Frames",
157 	"RX 65 - 127 Byte Frames",
158 	"RX 128 - 255 Byte Frames",
159 	"RX 256 - 511 Bytes Frames",
160 	"RX 512 - 1023 Byte Frames",
161 	"RX 1024 - 1518 Byte Frames",
162 	"RX Greater 1518 Byte Frames",
163 	"EEE RX LPI Transitions",
164 	"EEE RX LPI Time",
165 	"TX FCS Errors",
166 	"TX Excess Deferral Errors",
167 	"TX Carrier Errors",
168 	"TX Bad Byte Count",
169 	"TX Single Collisions",
170 	"TX Multiple Collisions",
171 	"TX Excessive Collision",
172 	"TX Late Collisions",
173 	"TX Unicast Byte Count",
174 	"TX Broadcast Byte Count",
175 	"TX Multicast Byte Count",
176 	"TX Unicast Frames",
177 	"TX Broadcast Frames",
178 	"TX Multicast Frames",
179 	"TX Pause Frames",
180 	"TX 64 Byte Frames",
181 	"TX 65 - 127 Byte Frames",
182 	"TX 128 - 255 Byte Frames",
183 	"TX 256 - 511 Bytes Frames",
184 	"TX 512 - 1023 Byte Frames",
185 	"TX 1024 - 1518 Byte Frames",
186 	"TX Greater 1518 Byte Frames",
187 	"EEE TX LPI Transitions",
188 	"EEE TX LPI Time",
189 };
190 
191 struct lan78xx_statstage {
192 	u32 rx_fcs_errors;
193 	u32 rx_alignment_errors;
194 	u32 rx_fragment_errors;
195 	u32 rx_jabber_errors;
196 	u32 rx_undersize_frame_errors;
197 	u32 rx_oversize_frame_errors;
198 	u32 rx_dropped_frames;
199 	u32 rx_unicast_byte_count;
200 	u32 rx_broadcast_byte_count;
201 	u32 rx_multicast_byte_count;
202 	u32 rx_unicast_frames;
203 	u32 rx_broadcast_frames;
204 	u32 rx_multicast_frames;
205 	u32 rx_pause_frames;
206 	u32 rx_64_byte_frames;
207 	u32 rx_65_127_byte_frames;
208 	u32 rx_128_255_byte_frames;
209 	u32 rx_256_511_bytes_frames;
210 	u32 rx_512_1023_byte_frames;
211 	u32 rx_1024_1518_byte_frames;
212 	u32 rx_greater_1518_byte_frames;
213 	u32 eee_rx_lpi_transitions;
214 	u32 eee_rx_lpi_time;
215 	u32 tx_fcs_errors;
216 	u32 tx_excess_deferral_errors;
217 	u32 tx_carrier_errors;
218 	u32 tx_bad_byte_count;
219 	u32 tx_single_collisions;
220 	u32 tx_multiple_collisions;
221 	u32 tx_excessive_collision;
222 	u32 tx_late_collisions;
223 	u32 tx_unicast_byte_count;
224 	u32 tx_broadcast_byte_count;
225 	u32 tx_multicast_byte_count;
226 	u32 tx_unicast_frames;
227 	u32 tx_broadcast_frames;
228 	u32 tx_multicast_frames;
229 	u32 tx_pause_frames;
230 	u32 tx_64_byte_frames;
231 	u32 tx_65_127_byte_frames;
232 	u32 tx_128_255_byte_frames;
233 	u32 tx_256_511_bytes_frames;
234 	u32 tx_512_1023_byte_frames;
235 	u32 tx_1024_1518_byte_frames;
236 	u32 tx_greater_1518_byte_frames;
237 	u32 eee_tx_lpi_transitions;
238 	u32 eee_tx_lpi_time;
239 };
240 
241 struct lan78xx_statstage64 {
242 	u64 rx_fcs_errors;
243 	u64 rx_alignment_errors;
244 	u64 rx_fragment_errors;
245 	u64 rx_jabber_errors;
246 	u64 rx_undersize_frame_errors;
247 	u64 rx_oversize_frame_errors;
248 	u64 rx_dropped_frames;
249 	u64 rx_unicast_byte_count;
250 	u64 rx_broadcast_byte_count;
251 	u64 rx_multicast_byte_count;
252 	u64 rx_unicast_frames;
253 	u64 rx_broadcast_frames;
254 	u64 rx_multicast_frames;
255 	u64 rx_pause_frames;
256 	u64 rx_64_byte_frames;
257 	u64 rx_65_127_byte_frames;
258 	u64 rx_128_255_byte_frames;
259 	u64 rx_256_511_bytes_frames;
260 	u64 rx_512_1023_byte_frames;
261 	u64 rx_1024_1518_byte_frames;
262 	u64 rx_greater_1518_byte_frames;
263 	u64 eee_rx_lpi_transitions;
264 	u64 eee_rx_lpi_time;
265 	u64 tx_fcs_errors;
266 	u64 tx_excess_deferral_errors;
267 	u64 tx_carrier_errors;
268 	u64 tx_bad_byte_count;
269 	u64 tx_single_collisions;
270 	u64 tx_multiple_collisions;
271 	u64 tx_excessive_collision;
272 	u64 tx_late_collisions;
273 	u64 tx_unicast_byte_count;
274 	u64 tx_broadcast_byte_count;
275 	u64 tx_multicast_byte_count;
276 	u64 tx_unicast_frames;
277 	u64 tx_broadcast_frames;
278 	u64 tx_multicast_frames;
279 	u64 tx_pause_frames;
280 	u64 tx_64_byte_frames;
281 	u64 tx_65_127_byte_frames;
282 	u64 tx_128_255_byte_frames;
283 	u64 tx_256_511_bytes_frames;
284 	u64 tx_512_1023_byte_frames;
285 	u64 tx_1024_1518_byte_frames;
286 	u64 tx_greater_1518_byte_frames;
287 	u64 eee_tx_lpi_transitions;
288 	u64 eee_tx_lpi_time;
289 };
290 
291 static u32 lan78xx_regs[] = {
292 	ID_REV,
293 	INT_STS,
294 	HW_CFG,
295 	PMT_CTL,
296 	E2P_CMD,
297 	E2P_DATA,
298 	USB_STATUS,
299 	VLAN_TYPE,
300 	MAC_CR,
301 	MAC_RX,
302 	MAC_TX,
303 	FLOW,
304 	ERR_STS,
305 	MII_ACC,
306 	MII_DATA,
307 	EEE_TX_LPI_REQ_DLY,
308 	EEE_TW_TX_SYS,
309 	EEE_TX_LPI_REM_DLY,
310 	WUCSR
311 };
312 
313 #define PHY_REG_SIZE (32 * sizeof(u32))
314 
315 struct lan78xx_net;
316 
317 struct lan78xx_priv {
318 	struct lan78xx_net *dev;
319 	u32 rfe_ctl;
320 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
321 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
322 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
323 	struct mutex dataport_mutex; /* for dataport access */
324 	spinlock_t rfe_ctl_lock; /* for rfe register access */
325 	struct work_struct set_multicast;
326 	struct work_struct set_vlan;
327 	u32 wol;
328 };
329 
330 enum skb_state {
331 	illegal = 0,
332 	tx_start,
333 	tx_done,
334 	rx_start,
335 	rx_done,
336 	rx_cleanup,
337 	unlink_start
338 };
339 
340 struct skb_data {		/* skb->cb is one of these */
341 	struct urb *urb;
342 	struct lan78xx_net *dev;
343 	enum skb_state state;
344 	size_t length;
345 	int num_of_packet;
346 };
347 
348 struct usb_context {
349 	struct usb_ctrlrequest req;
350 	struct lan78xx_net *dev;
351 };
352 
353 #define EVENT_TX_HALT			0
354 #define EVENT_RX_HALT			1
355 #define EVENT_RX_MEMORY			2
356 #define EVENT_STS_SPLIT			3
357 #define EVENT_LINK_RESET		4
358 #define EVENT_RX_PAUSED			5
359 #define EVENT_DEV_WAKING		6
360 #define EVENT_DEV_ASLEEP		7
361 #define EVENT_DEV_OPEN			8
362 #define EVENT_STAT_UPDATE		9
363 #define EVENT_DEV_DISCONNECT		10
364 
365 struct statstage {
366 	struct mutex			access_lock;	/* for stats access */
367 	struct lan78xx_statstage	saved;
368 	struct lan78xx_statstage	rollover_count;
369 	struct lan78xx_statstage	rollover_max;
370 	struct lan78xx_statstage64	curr_stat;
371 };
372 
373 struct irq_domain_data {
374 	struct irq_domain	*irqdomain;
375 	unsigned int		phyirq;
376 	struct irq_chip		*irqchip;
377 	irq_flow_handler_t	irq_handler;
378 	u32			irqenable;
379 	struct mutex		irq_lock;		/* for irq bus access */
380 };
381 
382 struct lan78xx_net {
383 	struct net_device	*net;
384 	struct usb_device	*udev;
385 	struct usb_interface	*intf;
386 	void			*driver_priv;
387 
388 	int			rx_qlen;
389 	int			tx_qlen;
390 	struct sk_buff_head	rxq;
391 	struct sk_buff_head	txq;
392 	struct sk_buff_head	done;
393 	struct sk_buff_head	txq_pend;
394 
395 	struct tasklet_struct	bh;
396 	struct delayed_work	wq;
397 
398 	int			msg_enable;
399 
400 	struct urb		*urb_intr;
401 	struct usb_anchor	deferred;
402 
403 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
404 	struct mutex		phy_mutex; /* for phy access */
405 	unsigned int		pipe_in, pipe_out, pipe_intr;
406 
407 	u32			hard_mtu;	/* count any extra framing */
408 	size_t			rx_urb_size;	/* size for rx urbs */
409 
410 	unsigned long		flags;
411 
412 	wait_queue_head_t	*wait;
413 	unsigned char		suspend_count;
414 
415 	unsigned int		maxpacket;
416 	struct timer_list	stat_monitor;
417 
418 	unsigned long		data[5];
419 
420 	int			link_on;
421 	u8			mdix_ctrl;
422 
423 	u32			chipid;
424 	u32			chiprev;
425 	struct mii_bus		*mdiobus;
426 	phy_interface_t		interface;
427 
428 	int			fc_autoneg;
429 	u8			fc_request_control;
430 
431 	int			delta;
432 	struct statstage	stats;
433 
434 	struct irq_domain_data	domain_data;
435 };
436 
437 /* define external phy id */
438 #define	PHY_LAN8835			(0x0007C130)
439 #define	PHY_KSZ9031RNX			(0x00221620)
440 
441 /* use ethtool to change the level for any given device */
442 static int msg_level = -1;
443 module_param(msg_level, int, 0);
444 MODULE_PARM_DESC(msg_level, "Override default message level");
445 
446 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
447 {
448 	u32 *buf;
449 	int ret;
450 
451 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
452 		return -ENODEV;
453 
454 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
455 	if (!buf)
456 		return -ENOMEM;
457 
458 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
459 			      USB_VENDOR_REQUEST_READ_REGISTER,
460 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
461 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
462 	if (likely(ret >= 0)) {
463 		le32_to_cpus(buf);
464 		*data = *buf;
465 	} else if (net_ratelimit()) {
466 		netdev_warn(dev->net,
467 			    "Failed to read register index 0x%08x. ret = %d",
468 			    index, ret);
469 	}
470 
471 	kfree(buf);
472 
473 	return ret;
474 }
475 
476 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
477 {
478 	u32 *buf;
479 	int ret;
480 
481 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
482 		return -ENODEV;
483 
484 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
485 	if (!buf)
486 		return -ENOMEM;
487 
488 	*buf = data;
489 	cpu_to_le32s(buf);
490 
491 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
492 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
493 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
494 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
495 	if (unlikely(ret < 0) &&
496 	    net_ratelimit()) {
497 		netdev_warn(dev->net,
498 			    "Failed to write register index 0x%08x. ret = %d",
499 			    index, ret);
500 	}
501 
502 	kfree(buf);
503 
504 	return ret;
505 }
506 
507 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
508 			      u32 data)
509 {
510 	int ret;
511 	u32 buf;
512 
513 	ret = lan78xx_read_reg(dev, reg, &buf);
514 	if (ret < 0)
515 		return ret;
516 
517 	buf &= ~mask;
518 	buf |= (mask & data);
519 
520 	ret = lan78xx_write_reg(dev, reg, buf);
521 	if (ret < 0)
522 		return ret;
523 
524 	return 0;
525 }
526 
527 static int lan78xx_read_stats(struct lan78xx_net *dev,
528 			      struct lan78xx_statstage *data)
529 {
530 	int ret = 0;
531 	int i;
532 	struct lan78xx_statstage *stats;
533 	u32 *src;
534 	u32 *dst;
535 
536 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
537 	if (!stats)
538 		return -ENOMEM;
539 
540 	ret = usb_control_msg(dev->udev,
541 			      usb_rcvctrlpipe(dev->udev, 0),
542 			      USB_VENDOR_REQUEST_GET_STATS,
543 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
544 			      0,
545 			      0,
546 			      (void *)stats,
547 			      sizeof(*stats),
548 			      USB_CTRL_SET_TIMEOUT);
549 	if (likely(ret >= 0)) {
550 		src = (u32 *)stats;
551 		dst = (u32 *)data;
552 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
553 			le32_to_cpus(&src[i]);
554 			dst[i] = src[i];
555 		}
556 	} else {
557 		netdev_warn(dev->net,
558 			    "Failed to read stat ret = %d", ret);
559 	}
560 
561 	kfree(stats);
562 
563 	return ret;
564 }
565 
566 #define check_counter_rollover(struct1, dev_stats, member)		\
567 	do {								\
568 		if ((struct1)->member < (dev_stats).saved.member)	\
569 			(dev_stats).rollover_count.member++;		\
570 	} while (0)
571 
572 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
573 					struct lan78xx_statstage *stats)
574 {
575 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
576 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
577 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
578 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
579 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
580 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
581 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
582 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
583 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
584 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
585 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
586 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
587 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
588 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
589 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
590 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
591 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
592 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
593 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
594 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
595 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
596 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
597 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
598 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
599 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
600 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
601 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
602 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
603 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
604 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
605 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
606 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
607 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
608 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
609 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
610 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
611 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
612 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
613 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
614 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
615 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
616 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
617 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
618 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
619 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
620 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
621 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
622 
623 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
624 }
625 
626 static void lan78xx_update_stats(struct lan78xx_net *dev)
627 {
628 	u32 *p, *count, *max;
629 	u64 *data;
630 	int i;
631 	struct lan78xx_statstage lan78xx_stats;
632 
633 	if (usb_autopm_get_interface(dev->intf) < 0)
634 		return;
635 
636 	p = (u32 *)&lan78xx_stats;
637 	count = (u32 *)&dev->stats.rollover_count;
638 	max = (u32 *)&dev->stats.rollover_max;
639 	data = (u64 *)&dev->stats.curr_stat;
640 
641 	mutex_lock(&dev->stats.access_lock);
642 
643 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
644 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
645 
646 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
647 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
648 
649 	mutex_unlock(&dev->stats.access_lock);
650 
651 	usb_autopm_put_interface(dev->intf);
652 }
653 
654 /* Loop until the read is completed with timeout called with phy_mutex held */
655 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
656 {
657 	unsigned long start_time = jiffies;
658 	u32 val;
659 	int ret;
660 
661 	do {
662 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
663 		if (unlikely(ret < 0))
664 			return -EIO;
665 
666 		if (!(val & MII_ACC_MII_BUSY_))
667 			return 0;
668 	} while (!time_after(jiffies, start_time + HZ));
669 
670 	return -EIO;
671 }
672 
673 static inline u32 mii_access(int id, int index, int read)
674 {
675 	u32 ret;
676 
677 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
678 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
679 	if (read)
680 		ret |= MII_ACC_MII_READ_;
681 	else
682 		ret |= MII_ACC_MII_WRITE_;
683 	ret |= MII_ACC_MII_BUSY_;
684 
685 	return ret;
686 }
687 
688 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
689 {
690 	unsigned long start_time = jiffies;
691 	u32 val;
692 	int ret;
693 
694 	do {
695 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
696 		if (unlikely(ret < 0))
697 			return -EIO;
698 
699 		if (!(val & E2P_CMD_EPC_BUSY_) ||
700 		    (val & E2P_CMD_EPC_TIMEOUT_))
701 			break;
702 		usleep_range(40, 100);
703 	} while (!time_after(jiffies, start_time + HZ));
704 
705 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
706 		netdev_warn(dev->net, "EEPROM read operation timeout");
707 		return -EIO;
708 	}
709 
710 	return 0;
711 }
712 
713 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
714 {
715 	unsigned long start_time = jiffies;
716 	u32 val;
717 	int ret;
718 
719 	do {
720 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
721 		if (unlikely(ret < 0))
722 			return -EIO;
723 
724 		if (!(val & E2P_CMD_EPC_BUSY_))
725 			return 0;
726 
727 		usleep_range(40, 100);
728 	} while (!time_after(jiffies, start_time + HZ));
729 
730 	netdev_warn(dev->net, "EEPROM is busy");
731 	return -EIO;
732 }
733 
734 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
735 				   u32 length, u8 *data)
736 {
737 	u32 val;
738 	u32 saved;
739 	int i, ret;
740 	int retval;
741 
742 	/* depends on chip, some EEPROM pins are muxed with LED function.
743 	 * disable & restore LED function to access EEPROM.
744 	 */
745 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
746 	saved = val;
747 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
748 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
749 		ret = lan78xx_write_reg(dev, HW_CFG, val);
750 	}
751 
752 	retval = lan78xx_eeprom_confirm_not_busy(dev);
753 	if (retval)
754 		return retval;
755 
756 	for (i = 0; i < length; i++) {
757 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
758 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
759 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
760 		if (unlikely(ret < 0)) {
761 			retval = -EIO;
762 			goto exit;
763 		}
764 
765 		retval = lan78xx_wait_eeprom(dev);
766 		if (retval < 0)
767 			goto exit;
768 
769 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
770 		if (unlikely(ret < 0)) {
771 			retval = -EIO;
772 			goto exit;
773 		}
774 
775 		data[i] = val & 0xFF;
776 		offset++;
777 	}
778 
779 	retval = 0;
780 exit:
781 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
782 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
783 
784 	return retval;
785 }
786 
787 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
788 			       u32 length, u8 *data)
789 {
790 	u8 sig;
791 	int ret;
792 
793 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
794 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
795 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
796 	else
797 		ret = -EINVAL;
798 
799 	return ret;
800 }
801 
802 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
803 				    u32 length, u8 *data)
804 {
805 	u32 val;
806 	u32 saved;
807 	int i, ret;
808 	int retval;
809 
810 	/* depends on chip, some EEPROM pins are muxed with LED function.
811 	 * disable & restore LED function to access EEPROM.
812 	 */
813 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
814 	saved = val;
815 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
816 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
817 		ret = lan78xx_write_reg(dev, HW_CFG, val);
818 	}
819 
820 	retval = lan78xx_eeprom_confirm_not_busy(dev);
821 	if (retval)
822 		goto exit;
823 
824 	/* Issue write/erase enable command */
825 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
826 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
827 	if (unlikely(ret < 0)) {
828 		retval = -EIO;
829 		goto exit;
830 	}
831 
832 	retval = lan78xx_wait_eeprom(dev);
833 	if (retval < 0)
834 		goto exit;
835 
836 	for (i = 0; i < length; i++) {
837 		/* Fill data register */
838 		val = data[i];
839 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
840 		if (ret < 0) {
841 			retval = -EIO;
842 			goto exit;
843 		}
844 
845 		/* Send "write" command */
846 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
847 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
848 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
849 		if (ret < 0) {
850 			retval = -EIO;
851 			goto exit;
852 		}
853 
854 		retval = lan78xx_wait_eeprom(dev);
855 		if (retval < 0)
856 			goto exit;
857 
858 		offset++;
859 	}
860 
861 	retval = 0;
862 exit:
863 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
864 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
865 
866 	return retval;
867 }
868 
869 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
870 				u32 length, u8 *data)
871 {
872 	int i;
873 	u32 buf;
874 	unsigned long timeout;
875 
876 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
877 
878 	if (buf & OTP_PWR_DN_PWRDN_N_) {
879 		/* clear it and wait to be cleared */
880 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
881 
882 		timeout = jiffies + HZ;
883 		do {
884 			usleep_range(1, 10);
885 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
886 			if (time_after(jiffies, timeout)) {
887 				netdev_warn(dev->net,
888 					    "timeout on OTP_PWR_DN");
889 				return -EIO;
890 			}
891 		} while (buf & OTP_PWR_DN_PWRDN_N_);
892 	}
893 
894 	for (i = 0; i < length; i++) {
895 		lan78xx_write_reg(dev, OTP_ADDR1,
896 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
897 		lan78xx_write_reg(dev, OTP_ADDR2,
898 				  ((offset + i) & OTP_ADDR2_10_3));
899 
900 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
901 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
902 
903 		timeout = jiffies + HZ;
904 		do {
905 			udelay(1);
906 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
907 			if (time_after(jiffies, timeout)) {
908 				netdev_warn(dev->net,
909 					    "timeout on OTP_STATUS");
910 				return -EIO;
911 			}
912 		} while (buf & OTP_STATUS_BUSY_);
913 
914 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
915 
916 		data[i] = (u8)(buf & 0xFF);
917 	}
918 
919 	return 0;
920 }
921 
922 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
923 				 u32 length, u8 *data)
924 {
925 	int i;
926 	u32 buf;
927 	unsigned long timeout;
928 
929 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
930 
931 	if (buf & OTP_PWR_DN_PWRDN_N_) {
932 		/* clear it and wait to be cleared */
933 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
934 
935 		timeout = jiffies + HZ;
936 		do {
937 			udelay(1);
938 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
939 			if (time_after(jiffies, timeout)) {
940 				netdev_warn(dev->net,
941 					    "timeout on OTP_PWR_DN completion");
942 				return -EIO;
943 			}
944 		} while (buf & OTP_PWR_DN_PWRDN_N_);
945 	}
946 
947 	/* set to BYTE program mode */
948 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
949 
950 	for (i = 0; i < length; i++) {
951 		lan78xx_write_reg(dev, OTP_ADDR1,
952 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
953 		lan78xx_write_reg(dev, OTP_ADDR2,
954 				  ((offset + i) & OTP_ADDR2_10_3));
955 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
956 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
957 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
958 
959 		timeout = jiffies + HZ;
960 		do {
961 			udelay(1);
962 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
963 			if (time_after(jiffies, timeout)) {
964 				netdev_warn(dev->net,
965 					    "Timeout on OTP_STATUS completion");
966 				return -EIO;
967 			}
968 		} while (buf & OTP_STATUS_BUSY_);
969 	}
970 
971 	return 0;
972 }
973 
974 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
975 			    u32 length, u8 *data)
976 {
977 	u8 sig;
978 	int ret;
979 
980 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
981 
982 	if (ret == 0) {
983 		if (sig == OTP_INDICATOR_2)
984 			offset += 0x100;
985 		else if (sig != OTP_INDICATOR_1)
986 			ret = -EINVAL;
987 		if (!ret)
988 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
989 	}
990 
991 	return ret;
992 }
993 
994 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
995 {
996 	int i, ret;
997 
998 	for (i = 0; i < 100; i++) {
999 		u32 dp_sel;
1000 
1001 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1002 		if (unlikely(ret < 0))
1003 			return -EIO;
1004 
1005 		if (dp_sel & DP_SEL_DPRDY_)
1006 			return 0;
1007 
1008 		usleep_range(40, 100);
1009 	}
1010 
1011 	netdev_warn(dev->net, "%s timed out", __func__);
1012 
1013 	return -EIO;
1014 }
1015 
1016 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1017 				  u32 addr, u32 length, u32 *buf)
1018 {
1019 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1020 	u32 dp_sel;
1021 	int i, ret;
1022 
1023 	if (usb_autopm_get_interface(dev->intf) < 0)
1024 		return 0;
1025 
1026 	mutex_lock(&pdata->dataport_mutex);
1027 
1028 	ret = lan78xx_dataport_wait_not_busy(dev);
1029 	if (ret < 0)
1030 		goto done;
1031 
1032 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1033 
1034 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1035 	dp_sel |= ram_select;
1036 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1037 
1038 	for (i = 0; i < length; i++) {
1039 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1040 
1041 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1042 
1043 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1044 
1045 		ret = lan78xx_dataport_wait_not_busy(dev);
1046 		if (ret < 0)
1047 			goto done;
1048 	}
1049 
1050 done:
1051 	mutex_unlock(&pdata->dataport_mutex);
1052 	usb_autopm_put_interface(dev->intf);
1053 
1054 	return ret;
1055 }
1056 
1057 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1058 				    int index, u8 addr[ETH_ALEN])
1059 {
1060 	u32 temp;
1061 
1062 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1063 		temp = addr[3];
1064 		temp = addr[2] | (temp << 8);
1065 		temp = addr[1] | (temp << 8);
1066 		temp = addr[0] | (temp << 8);
1067 		pdata->pfilter_table[index][1] = temp;
1068 		temp = addr[5];
1069 		temp = addr[4] | (temp << 8);
1070 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1071 		pdata->pfilter_table[index][0] = temp;
1072 	}
1073 }
1074 
1075 /* returns hash bit number for given MAC address */
1076 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1077 {
1078 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1079 }
1080 
1081 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1082 {
1083 	struct lan78xx_priv *pdata =
1084 			container_of(param, struct lan78xx_priv, set_multicast);
1085 	struct lan78xx_net *dev = pdata->dev;
1086 	int i;
1087 
1088 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1089 		  pdata->rfe_ctl);
1090 
1091 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1092 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1093 
1094 	for (i = 1; i < NUM_OF_MAF; i++) {
1095 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1096 		lan78xx_write_reg(dev, MAF_LO(i),
1097 				  pdata->pfilter_table[i][1]);
1098 		lan78xx_write_reg(dev, MAF_HI(i),
1099 				  pdata->pfilter_table[i][0]);
1100 	}
1101 
1102 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1103 }
1104 
1105 static void lan78xx_set_multicast(struct net_device *netdev)
1106 {
1107 	struct lan78xx_net *dev = netdev_priv(netdev);
1108 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1109 	unsigned long flags;
1110 	int i;
1111 
1112 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1113 
1114 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1115 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1116 
1117 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1118 		pdata->mchash_table[i] = 0;
1119 
1120 	/* pfilter_table[0] has own HW address */
1121 	for (i = 1; i < NUM_OF_MAF; i++) {
1122 		pdata->pfilter_table[i][0] = 0;
1123 		pdata->pfilter_table[i][1] = 0;
1124 	}
1125 
1126 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1127 
1128 	if (dev->net->flags & IFF_PROMISC) {
1129 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1130 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1131 	} else {
1132 		if (dev->net->flags & IFF_ALLMULTI) {
1133 			netif_dbg(dev, drv, dev->net,
1134 				  "receive all multicast enabled");
1135 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1136 		}
1137 	}
1138 
1139 	if (netdev_mc_count(dev->net)) {
1140 		struct netdev_hw_addr *ha;
1141 		int i;
1142 
1143 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1144 
1145 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1146 
1147 		i = 1;
1148 		netdev_for_each_mc_addr(ha, netdev) {
1149 			/* set first 32 into Perfect Filter */
1150 			if (i < 33) {
1151 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1152 			} else {
1153 				u32 bitnum = lan78xx_hash(ha->addr);
1154 
1155 				pdata->mchash_table[bitnum / 32] |=
1156 							(1 << (bitnum % 32));
1157 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1158 			}
1159 			i++;
1160 		}
1161 	}
1162 
1163 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1164 
1165 	/* defer register writes to a sleepable context */
1166 	schedule_work(&pdata->set_multicast);
1167 }
1168 
1169 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1170 				      u16 lcladv, u16 rmtadv)
1171 {
1172 	u32 flow = 0, fct_flow = 0;
1173 	u8 cap;
1174 
1175 	if (dev->fc_autoneg)
1176 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1177 	else
1178 		cap = dev->fc_request_control;
1179 
1180 	if (cap & FLOW_CTRL_TX)
1181 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1182 
1183 	if (cap & FLOW_CTRL_RX)
1184 		flow |= FLOW_CR_RX_FCEN_;
1185 
1186 	if (dev->udev->speed == USB_SPEED_SUPER)
1187 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1188 	else if (dev->udev->speed == USB_SPEED_HIGH)
1189 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1190 
1191 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1192 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1193 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1194 
1195 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1196 
1197 	/* threshold value should be set before enabling flow */
1198 	lan78xx_write_reg(dev, FLOW, flow);
1199 
1200 	return 0;
1201 }
1202 
1203 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1204 {
1205 	unsigned long start_time = jiffies;
1206 	u32 val;
1207 	int ret;
1208 
1209 	mutex_lock(&dev->phy_mutex);
1210 
1211 	/* Resetting the device while there is activity on the MDIO
1212 	 * bus can result in the MAC interface locking up and not
1213 	 * completing register access transactions.
1214 	 */
1215 	ret = lan78xx_phy_wait_not_busy(dev);
1216 	if (ret < 0)
1217 		goto done;
1218 
1219 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1220 	if (ret < 0)
1221 		goto done;
1222 
1223 	val |= MAC_CR_RST_;
1224 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1225 	if (ret < 0)
1226 		goto done;
1227 
1228 	/* Wait for the reset to complete before allowing any further
1229 	 * MAC register accesses otherwise the MAC may lock up.
1230 	 */
1231 	do {
1232 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1233 		if (ret < 0)
1234 			goto done;
1235 
1236 		if (!(val & MAC_CR_RST_)) {
1237 			ret = 0;
1238 			goto done;
1239 		}
1240 	} while (!time_after(jiffies, start_time + HZ));
1241 
1242 	ret = -ETIMEDOUT;
1243 done:
1244 	mutex_unlock(&dev->phy_mutex);
1245 
1246 	return ret;
1247 }
1248 
1249 static int lan78xx_link_reset(struct lan78xx_net *dev)
1250 {
1251 	struct phy_device *phydev = dev->net->phydev;
1252 	struct ethtool_link_ksettings ecmd;
1253 	int ladv, radv, ret, link;
1254 	u32 buf;
1255 
1256 	/* clear LAN78xx interrupt status */
1257 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1258 	if (unlikely(ret < 0))
1259 		return ret;
1260 
1261 	mutex_lock(&phydev->lock);
1262 	phy_read_status(phydev);
1263 	link = phydev->link;
1264 	mutex_unlock(&phydev->lock);
1265 
1266 	if (!link && dev->link_on) {
1267 		dev->link_on = false;
1268 
1269 		/* reset MAC */
1270 		ret = lan78xx_mac_reset(dev);
1271 		if (ret < 0)
1272 			return ret;
1273 
1274 		del_timer(&dev->stat_monitor);
1275 	} else if (link && !dev->link_on) {
1276 		dev->link_on = true;
1277 
1278 		phy_ethtool_ksettings_get(phydev, &ecmd);
1279 
1280 		if (dev->udev->speed == USB_SPEED_SUPER) {
1281 			if (ecmd.base.speed == 1000) {
1282 				/* disable U2 */
1283 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1284 				if (ret < 0)
1285 					return ret;
1286 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1287 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1288 				if (ret < 0)
1289 					return ret;
1290 				/* enable U1 */
1291 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1292 				if (ret < 0)
1293 					return ret;
1294 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1295 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1296 				if (ret < 0)
1297 					return ret;
1298 			} else {
1299 				/* enable U1 & U2 */
1300 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1301 				if (ret < 0)
1302 					return ret;
1303 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1304 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1305 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1306 				if (ret < 0)
1307 					return ret;
1308 			}
1309 		}
1310 
1311 		ladv = phy_read(phydev, MII_ADVERTISE);
1312 		if (ladv < 0)
1313 			return ladv;
1314 
1315 		radv = phy_read(phydev, MII_LPA);
1316 		if (radv < 0)
1317 			return radv;
1318 
1319 		netif_dbg(dev, link, dev->net,
1320 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1321 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1322 
1323 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1324 						 radv);
1325 		if (ret < 0)
1326 			return ret;
1327 
1328 		if (!timer_pending(&dev->stat_monitor)) {
1329 			dev->delta = 1;
1330 			mod_timer(&dev->stat_monitor,
1331 				  jiffies + STAT_UPDATE_TIMER);
1332 		}
1333 
1334 		tasklet_schedule(&dev->bh);
1335 	}
1336 
1337 	return 0;
1338 }
1339 
1340 /* some work can't be done in tasklets, so we use keventd
1341  *
1342  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1343  * but tasklet_schedule() doesn't.	hope the failure is rare.
1344  */
1345 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1346 {
1347 	set_bit(work, &dev->flags);
1348 	if (!schedule_delayed_work(&dev->wq, 0))
1349 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1350 }
1351 
1352 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1353 {
1354 	u32 intdata;
1355 
1356 	if (urb->actual_length != 4) {
1357 		netdev_warn(dev->net,
1358 			    "unexpected urb length %d", urb->actual_length);
1359 		return;
1360 	}
1361 
1362 	intdata = get_unaligned_le32(urb->transfer_buffer);
1363 
1364 	if (intdata & INT_ENP_PHY_INT) {
1365 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1366 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1367 
1368 		if (dev->domain_data.phyirq > 0) {
1369 			local_irq_disable();
1370 			generic_handle_irq(dev->domain_data.phyirq);
1371 			local_irq_enable();
1372 		}
1373 	} else {
1374 		netdev_warn(dev->net,
1375 			    "unexpected interrupt: 0x%08x\n", intdata);
1376 	}
1377 }
1378 
1379 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1380 {
1381 	return MAX_EEPROM_SIZE;
1382 }
1383 
1384 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1385 				      struct ethtool_eeprom *ee, u8 *data)
1386 {
1387 	struct lan78xx_net *dev = netdev_priv(netdev);
1388 	int ret;
1389 
1390 	ret = usb_autopm_get_interface(dev->intf);
1391 	if (ret)
1392 		return ret;
1393 
1394 	ee->magic = LAN78XX_EEPROM_MAGIC;
1395 
1396 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1397 
1398 	usb_autopm_put_interface(dev->intf);
1399 
1400 	return ret;
1401 }
1402 
1403 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1404 				      struct ethtool_eeprom *ee, u8 *data)
1405 {
1406 	struct lan78xx_net *dev = netdev_priv(netdev);
1407 	int ret;
1408 
1409 	ret = usb_autopm_get_interface(dev->intf);
1410 	if (ret)
1411 		return ret;
1412 
1413 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1414 	 * to load data from EEPROM
1415 	 */
1416 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1417 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1418 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1419 		 (ee->offset == 0) &&
1420 		 (ee->len == 512) &&
1421 		 (data[0] == OTP_INDICATOR_1))
1422 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1423 
1424 	usb_autopm_put_interface(dev->intf);
1425 
1426 	return ret;
1427 }
1428 
1429 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1430 				u8 *data)
1431 {
1432 	if (stringset == ETH_SS_STATS)
1433 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1434 }
1435 
1436 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1437 {
1438 	if (sset == ETH_SS_STATS)
1439 		return ARRAY_SIZE(lan78xx_gstrings);
1440 	else
1441 		return -EOPNOTSUPP;
1442 }
1443 
1444 static void lan78xx_get_stats(struct net_device *netdev,
1445 			      struct ethtool_stats *stats, u64 *data)
1446 {
1447 	struct lan78xx_net *dev = netdev_priv(netdev);
1448 
1449 	lan78xx_update_stats(dev);
1450 
1451 	mutex_lock(&dev->stats.access_lock);
1452 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1453 	mutex_unlock(&dev->stats.access_lock);
1454 }
1455 
1456 static void lan78xx_get_wol(struct net_device *netdev,
1457 			    struct ethtool_wolinfo *wol)
1458 {
1459 	struct lan78xx_net *dev = netdev_priv(netdev);
1460 	int ret;
1461 	u32 buf;
1462 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1463 
1464 	if (usb_autopm_get_interface(dev->intf) < 0)
1465 		return;
1466 
1467 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1468 	if (unlikely(ret < 0)) {
1469 		wol->supported = 0;
1470 		wol->wolopts = 0;
1471 	} else {
1472 		if (buf & USB_CFG_RMT_WKP_) {
1473 			wol->supported = WAKE_ALL;
1474 			wol->wolopts = pdata->wol;
1475 		} else {
1476 			wol->supported = 0;
1477 			wol->wolopts = 0;
1478 		}
1479 	}
1480 
1481 	usb_autopm_put_interface(dev->intf);
1482 }
1483 
1484 static int lan78xx_set_wol(struct net_device *netdev,
1485 			   struct ethtool_wolinfo *wol)
1486 {
1487 	struct lan78xx_net *dev = netdev_priv(netdev);
1488 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1489 	int ret;
1490 
1491 	ret = usb_autopm_get_interface(dev->intf);
1492 	if (ret < 0)
1493 		return ret;
1494 
1495 	if (wol->wolopts & ~WAKE_ALL)
1496 		return -EINVAL;
1497 
1498 	pdata->wol = wol->wolopts;
1499 
1500 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1501 
1502 	phy_ethtool_set_wol(netdev->phydev, wol);
1503 
1504 	usb_autopm_put_interface(dev->intf);
1505 
1506 	return ret;
1507 }
1508 
1509 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1510 {
1511 	struct lan78xx_net *dev = netdev_priv(net);
1512 	struct phy_device *phydev = net->phydev;
1513 	int ret;
1514 	u32 buf;
1515 
1516 	ret = usb_autopm_get_interface(dev->intf);
1517 	if (ret < 0)
1518 		return ret;
1519 
1520 	ret = phy_ethtool_get_eee(phydev, edata);
1521 	if (ret < 0)
1522 		goto exit;
1523 
1524 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1525 	if (buf & MAC_CR_EEE_EN_) {
1526 		edata->eee_enabled = true;
1527 		edata->eee_active = !!(edata->advertised &
1528 				       edata->lp_advertised);
1529 		edata->tx_lpi_enabled = true;
1530 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1531 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1532 		edata->tx_lpi_timer = buf;
1533 	} else {
1534 		edata->eee_enabled = false;
1535 		edata->eee_active = false;
1536 		edata->tx_lpi_enabled = false;
1537 		edata->tx_lpi_timer = 0;
1538 	}
1539 
1540 	ret = 0;
1541 exit:
1542 	usb_autopm_put_interface(dev->intf);
1543 
1544 	return ret;
1545 }
1546 
1547 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1548 {
1549 	struct lan78xx_net *dev = netdev_priv(net);
1550 	int ret;
1551 	u32 buf;
1552 
1553 	ret = usb_autopm_get_interface(dev->intf);
1554 	if (ret < 0)
1555 		return ret;
1556 
1557 	if (edata->eee_enabled) {
1558 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1559 		buf |= MAC_CR_EEE_EN_;
1560 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1561 
1562 		phy_ethtool_set_eee(net->phydev, edata);
1563 
1564 		buf = (u32)edata->tx_lpi_timer;
1565 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1566 	} else {
1567 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1568 		buf &= ~MAC_CR_EEE_EN_;
1569 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1570 	}
1571 
1572 	usb_autopm_put_interface(dev->intf);
1573 
1574 	return 0;
1575 }
1576 
1577 static u32 lan78xx_get_link(struct net_device *net)
1578 {
1579 	u32 link;
1580 
1581 	mutex_lock(&net->phydev->lock);
1582 	phy_read_status(net->phydev);
1583 	link = net->phydev->link;
1584 	mutex_unlock(&net->phydev->lock);
1585 
1586 	return link;
1587 }
1588 
1589 static void lan78xx_get_drvinfo(struct net_device *net,
1590 				struct ethtool_drvinfo *info)
1591 {
1592 	struct lan78xx_net *dev = netdev_priv(net);
1593 
1594 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1595 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1596 }
1597 
1598 static u32 lan78xx_get_msglevel(struct net_device *net)
1599 {
1600 	struct lan78xx_net *dev = netdev_priv(net);
1601 
1602 	return dev->msg_enable;
1603 }
1604 
1605 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1606 {
1607 	struct lan78xx_net *dev = netdev_priv(net);
1608 
1609 	dev->msg_enable = level;
1610 }
1611 
1612 static int lan78xx_get_link_ksettings(struct net_device *net,
1613 				      struct ethtool_link_ksettings *cmd)
1614 {
1615 	struct lan78xx_net *dev = netdev_priv(net);
1616 	struct phy_device *phydev = net->phydev;
1617 	int ret;
1618 
1619 	ret = usb_autopm_get_interface(dev->intf);
1620 	if (ret < 0)
1621 		return ret;
1622 
1623 	phy_ethtool_ksettings_get(phydev, cmd);
1624 
1625 	usb_autopm_put_interface(dev->intf);
1626 
1627 	return ret;
1628 }
1629 
1630 static int lan78xx_set_link_ksettings(struct net_device *net,
1631 				      const struct ethtool_link_ksettings *cmd)
1632 {
1633 	struct lan78xx_net *dev = netdev_priv(net);
1634 	struct phy_device *phydev = net->phydev;
1635 	int ret = 0;
1636 	int temp;
1637 
1638 	ret = usb_autopm_get_interface(dev->intf);
1639 	if (ret < 0)
1640 		return ret;
1641 
1642 	/* change speed & duplex */
1643 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1644 
1645 	if (!cmd->base.autoneg) {
1646 		/* force link down */
1647 		temp = phy_read(phydev, MII_BMCR);
1648 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1649 		mdelay(1);
1650 		phy_write(phydev, MII_BMCR, temp);
1651 	}
1652 
1653 	usb_autopm_put_interface(dev->intf);
1654 
1655 	return ret;
1656 }
1657 
1658 static void lan78xx_get_pause(struct net_device *net,
1659 			      struct ethtool_pauseparam *pause)
1660 {
1661 	struct lan78xx_net *dev = netdev_priv(net);
1662 	struct phy_device *phydev = net->phydev;
1663 	struct ethtool_link_ksettings ecmd;
1664 
1665 	phy_ethtool_ksettings_get(phydev, &ecmd);
1666 
1667 	pause->autoneg = dev->fc_autoneg;
1668 
1669 	if (dev->fc_request_control & FLOW_CTRL_TX)
1670 		pause->tx_pause = 1;
1671 
1672 	if (dev->fc_request_control & FLOW_CTRL_RX)
1673 		pause->rx_pause = 1;
1674 }
1675 
1676 static int lan78xx_set_pause(struct net_device *net,
1677 			     struct ethtool_pauseparam *pause)
1678 {
1679 	struct lan78xx_net *dev = netdev_priv(net);
1680 	struct phy_device *phydev = net->phydev;
1681 	struct ethtool_link_ksettings ecmd;
1682 	int ret;
1683 
1684 	phy_ethtool_ksettings_get(phydev, &ecmd);
1685 
1686 	if (pause->autoneg && !ecmd.base.autoneg) {
1687 		ret = -EINVAL;
1688 		goto exit;
1689 	}
1690 
1691 	dev->fc_request_control = 0;
1692 	if (pause->rx_pause)
1693 		dev->fc_request_control |= FLOW_CTRL_RX;
1694 
1695 	if (pause->tx_pause)
1696 		dev->fc_request_control |= FLOW_CTRL_TX;
1697 
1698 	if (ecmd.base.autoneg) {
1699 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1700 		u32 mii_adv;
1701 
1702 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1703 				   ecmd.link_modes.advertising);
1704 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1705 				   ecmd.link_modes.advertising);
1706 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1707 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1708 		linkmode_or(ecmd.link_modes.advertising, fc,
1709 			    ecmd.link_modes.advertising);
1710 
1711 		phy_ethtool_ksettings_set(phydev, &ecmd);
1712 	}
1713 
1714 	dev->fc_autoneg = pause->autoneg;
1715 
1716 	ret = 0;
1717 exit:
1718 	return ret;
1719 }
1720 
1721 static int lan78xx_get_regs_len(struct net_device *netdev)
1722 {
1723 	if (!netdev->phydev)
1724 		return (sizeof(lan78xx_regs));
1725 	else
1726 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1727 }
1728 
1729 static void
1730 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1731 		 void *buf)
1732 {
1733 	u32 *data = buf;
1734 	int i, j;
1735 	struct lan78xx_net *dev = netdev_priv(netdev);
1736 
1737 	/* Read Device/MAC registers */
1738 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1739 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1740 
1741 	if (!netdev->phydev)
1742 		return;
1743 
1744 	/* Read PHY registers */
1745 	for (j = 0; j < 32; i++, j++)
1746 		data[i] = phy_read(netdev->phydev, j);
1747 }
1748 
1749 static const struct ethtool_ops lan78xx_ethtool_ops = {
1750 	.get_link	= lan78xx_get_link,
1751 	.nway_reset	= phy_ethtool_nway_reset,
1752 	.get_drvinfo	= lan78xx_get_drvinfo,
1753 	.get_msglevel	= lan78xx_get_msglevel,
1754 	.set_msglevel	= lan78xx_set_msglevel,
1755 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1756 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1757 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1758 	.get_ethtool_stats = lan78xx_get_stats,
1759 	.get_sset_count = lan78xx_get_sset_count,
1760 	.get_strings	= lan78xx_get_strings,
1761 	.get_wol	= lan78xx_get_wol,
1762 	.set_wol	= lan78xx_set_wol,
1763 	.get_ts_info	= ethtool_op_get_ts_info,
1764 	.get_eee	= lan78xx_get_eee,
1765 	.set_eee	= lan78xx_set_eee,
1766 	.get_pauseparam	= lan78xx_get_pause,
1767 	.set_pauseparam	= lan78xx_set_pause,
1768 	.get_link_ksettings = lan78xx_get_link_ksettings,
1769 	.set_link_ksettings = lan78xx_set_link_ksettings,
1770 	.get_regs_len	= lan78xx_get_regs_len,
1771 	.get_regs	= lan78xx_get_regs,
1772 };
1773 
1774 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1775 {
1776 	u32 addr_lo, addr_hi;
1777 	u8 addr[6];
1778 
1779 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1780 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1781 
1782 	addr[0] = addr_lo & 0xFF;
1783 	addr[1] = (addr_lo >> 8) & 0xFF;
1784 	addr[2] = (addr_lo >> 16) & 0xFF;
1785 	addr[3] = (addr_lo >> 24) & 0xFF;
1786 	addr[4] = addr_hi & 0xFF;
1787 	addr[5] = (addr_hi >> 8) & 0xFF;
1788 
1789 	if (!is_valid_ether_addr(addr)) {
1790 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1791 			/* valid address present in Device Tree */
1792 			netif_dbg(dev, ifup, dev->net,
1793 				  "MAC address read from Device Tree");
1794 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1795 						 ETH_ALEN, addr) == 0) ||
1796 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1797 					      ETH_ALEN, addr) == 0)) &&
1798 			   is_valid_ether_addr(addr)) {
1799 			/* eeprom values are valid so use them */
1800 			netif_dbg(dev, ifup, dev->net,
1801 				  "MAC address read from EEPROM");
1802 		} else {
1803 			/* generate random MAC */
1804 			eth_random_addr(addr);
1805 			netif_dbg(dev, ifup, dev->net,
1806 				  "MAC address set to random addr");
1807 		}
1808 
1809 		addr_lo = addr[0] | (addr[1] << 8) |
1810 			  (addr[2] << 16) | (addr[3] << 24);
1811 		addr_hi = addr[4] | (addr[5] << 8);
1812 
1813 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1814 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1815 	}
1816 
1817 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1818 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1819 
1820 	eth_hw_addr_set(dev->net, addr);
1821 }
1822 
1823 /* MDIO read and write wrappers for phylib */
1824 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1825 {
1826 	struct lan78xx_net *dev = bus->priv;
1827 	u32 val, addr;
1828 	int ret;
1829 
1830 	ret = usb_autopm_get_interface(dev->intf);
1831 	if (ret < 0)
1832 		return ret;
1833 
1834 	mutex_lock(&dev->phy_mutex);
1835 
1836 	/* confirm MII not busy */
1837 	ret = lan78xx_phy_wait_not_busy(dev);
1838 	if (ret < 0)
1839 		goto done;
1840 
1841 	/* set the address, index & direction (read from PHY) */
1842 	addr = mii_access(phy_id, idx, MII_READ);
1843 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1844 
1845 	ret = lan78xx_phy_wait_not_busy(dev);
1846 	if (ret < 0)
1847 		goto done;
1848 
1849 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1850 
1851 	ret = (int)(val & 0xFFFF);
1852 
1853 done:
1854 	mutex_unlock(&dev->phy_mutex);
1855 	usb_autopm_put_interface(dev->intf);
1856 
1857 	return ret;
1858 }
1859 
1860 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1861 				 u16 regval)
1862 {
1863 	struct lan78xx_net *dev = bus->priv;
1864 	u32 val, addr;
1865 	int ret;
1866 
1867 	ret = usb_autopm_get_interface(dev->intf);
1868 	if (ret < 0)
1869 		return ret;
1870 
1871 	mutex_lock(&dev->phy_mutex);
1872 
1873 	/* confirm MII not busy */
1874 	ret = lan78xx_phy_wait_not_busy(dev);
1875 	if (ret < 0)
1876 		goto done;
1877 
1878 	val = (u32)regval;
1879 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1880 
1881 	/* set the address, index & direction (write to PHY) */
1882 	addr = mii_access(phy_id, idx, MII_WRITE);
1883 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1884 
1885 	ret = lan78xx_phy_wait_not_busy(dev);
1886 	if (ret < 0)
1887 		goto done;
1888 
1889 done:
1890 	mutex_unlock(&dev->phy_mutex);
1891 	usb_autopm_put_interface(dev->intf);
1892 	return 0;
1893 }
1894 
1895 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1896 {
1897 	struct device_node *node;
1898 	int ret;
1899 
1900 	dev->mdiobus = mdiobus_alloc();
1901 	if (!dev->mdiobus) {
1902 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1903 		return -ENOMEM;
1904 	}
1905 
1906 	dev->mdiobus->priv = (void *)dev;
1907 	dev->mdiobus->read = lan78xx_mdiobus_read;
1908 	dev->mdiobus->write = lan78xx_mdiobus_write;
1909 	dev->mdiobus->name = "lan78xx-mdiobus";
1910 	dev->mdiobus->parent = &dev->udev->dev;
1911 
1912 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1913 		 dev->udev->bus->busnum, dev->udev->devnum);
1914 
1915 	switch (dev->chipid) {
1916 	case ID_REV_CHIP_ID_7800_:
1917 	case ID_REV_CHIP_ID_7850_:
1918 		/* set to internal PHY id */
1919 		dev->mdiobus->phy_mask = ~(1 << 1);
1920 		break;
1921 	case ID_REV_CHIP_ID_7801_:
1922 		/* scan thru PHYAD[2..0] */
1923 		dev->mdiobus->phy_mask = ~(0xFF);
1924 		break;
1925 	}
1926 
1927 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1928 	ret = of_mdiobus_register(dev->mdiobus, node);
1929 	of_node_put(node);
1930 	if (ret) {
1931 		netdev_err(dev->net, "can't register MDIO bus\n");
1932 		goto exit1;
1933 	}
1934 
1935 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1936 	return 0;
1937 exit1:
1938 	mdiobus_free(dev->mdiobus);
1939 	return ret;
1940 }
1941 
1942 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1943 {
1944 	mdiobus_unregister(dev->mdiobus);
1945 	mdiobus_free(dev->mdiobus);
1946 }
1947 
1948 static void lan78xx_link_status_change(struct net_device *net)
1949 {
1950 	struct phy_device *phydev = net->phydev;
1951 	int temp;
1952 
1953 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1954 	 * when cable is switched between long(~50+m) and short one.
1955 	 * As workaround, set to 10 before setting to 100
1956 	 * at forced 100 F/H mode.
1957 	 */
1958 	if (!phydev->autoneg && (phydev->speed == 100)) {
1959 		/* disable phy interrupt */
1960 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1961 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1962 		phy_write(phydev, LAN88XX_INT_MASK, temp);
1963 
1964 		temp = phy_read(phydev, MII_BMCR);
1965 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1966 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1967 		temp |= BMCR_SPEED100;
1968 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1969 
1970 		/* clear pending interrupt generated while workaround */
1971 		temp = phy_read(phydev, LAN88XX_INT_STS);
1972 
1973 		/* enable phy interrupt back */
1974 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1975 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1976 		phy_write(phydev, LAN88XX_INT_MASK, temp);
1977 	}
1978 }
1979 
1980 static int irq_map(struct irq_domain *d, unsigned int irq,
1981 		   irq_hw_number_t hwirq)
1982 {
1983 	struct irq_domain_data *data = d->host_data;
1984 
1985 	irq_set_chip_data(irq, data);
1986 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1987 	irq_set_noprobe(irq);
1988 
1989 	return 0;
1990 }
1991 
1992 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1993 {
1994 	irq_set_chip_and_handler(irq, NULL, NULL);
1995 	irq_set_chip_data(irq, NULL);
1996 }
1997 
1998 static const struct irq_domain_ops chip_domain_ops = {
1999 	.map	= irq_map,
2000 	.unmap	= irq_unmap,
2001 };
2002 
2003 static void lan78xx_irq_mask(struct irq_data *irqd)
2004 {
2005 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2006 
2007 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2008 }
2009 
2010 static void lan78xx_irq_unmask(struct irq_data *irqd)
2011 {
2012 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2013 
2014 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2015 }
2016 
2017 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2018 {
2019 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2020 
2021 	mutex_lock(&data->irq_lock);
2022 }
2023 
2024 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2025 {
2026 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2027 	struct lan78xx_net *dev =
2028 			container_of(data, struct lan78xx_net, domain_data);
2029 	u32 buf;
2030 
2031 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2032 	 * are only two callbacks executed in non-atomic contex.
2033 	 */
2034 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2035 	if (buf != data->irqenable)
2036 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2037 
2038 	mutex_unlock(&data->irq_lock);
2039 }
2040 
2041 static struct irq_chip lan78xx_irqchip = {
2042 	.name			= "lan78xx-irqs",
2043 	.irq_mask		= lan78xx_irq_mask,
2044 	.irq_unmask		= lan78xx_irq_unmask,
2045 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2046 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2047 };
2048 
2049 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2050 {
2051 	struct device_node *of_node;
2052 	struct irq_domain *irqdomain;
2053 	unsigned int irqmap = 0;
2054 	u32 buf;
2055 	int ret = 0;
2056 
2057 	of_node = dev->udev->dev.parent->of_node;
2058 
2059 	mutex_init(&dev->domain_data.irq_lock);
2060 
2061 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2062 	dev->domain_data.irqenable = buf;
2063 
2064 	dev->domain_data.irqchip = &lan78xx_irqchip;
2065 	dev->domain_data.irq_handler = handle_simple_irq;
2066 
2067 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2068 					  &chip_domain_ops, &dev->domain_data);
2069 	if (irqdomain) {
2070 		/* create mapping for PHY interrupt */
2071 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2072 		if (!irqmap) {
2073 			irq_domain_remove(irqdomain);
2074 
2075 			irqdomain = NULL;
2076 			ret = -EINVAL;
2077 		}
2078 	} else {
2079 		ret = -EINVAL;
2080 	}
2081 
2082 	dev->domain_data.irqdomain = irqdomain;
2083 	dev->domain_data.phyirq = irqmap;
2084 
2085 	return ret;
2086 }
2087 
2088 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2089 {
2090 	if (dev->domain_data.phyirq > 0) {
2091 		irq_dispose_mapping(dev->domain_data.phyirq);
2092 
2093 		if (dev->domain_data.irqdomain)
2094 			irq_domain_remove(dev->domain_data.irqdomain);
2095 	}
2096 	dev->domain_data.phyirq = 0;
2097 	dev->domain_data.irqdomain = NULL;
2098 }
2099 
2100 static int lan8835_fixup(struct phy_device *phydev)
2101 {
2102 	int buf;
2103 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2104 
2105 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2106 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2107 	buf &= ~0x1800;
2108 	buf |= 0x0800;
2109 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2110 
2111 	/* RGMII MAC TXC Delay Enable */
2112 	lan78xx_write_reg(dev, MAC_RGMII_ID,
2113 			  MAC_RGMII_ID_TXC_DELAY_EN_);
2114 
2115 	/* RGMII TX DLL Tune Adjust */
2116 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2117 
2118 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2119 
2120 	return 1;
2121 }
2122 
2123 static int ksz9031rnx_fixup(struct phy_device *phydev)
2124 {
2125 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2126 
2127 	/* Micrel9301RNX PHY configuration */
2128 	/* RGMII Control Signal Pad Skew */
2129 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2130 	/* RGMII RX Data Pad Skew */
2131 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2132 	/* RGMII RX Clock Pad Skew */
2133 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2134 
2135 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2136 
2137 	return 1;
2138 }
2139 
2140 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2141 {
2142 	u32 buf;
2143 	int ret;
2144 	struct fixed_phy_status fphy_status = {
2145 		.link = 1,
2146 		.speed = SPEED_1000,
2147 		.duplex = DUPLEX_FULL,
2148 	};
2149 	struct phy_device *phydev;
2150 
2151 	phydev = phy_find_first(dev->mdiobus);
2152 	if (!phydev) {
2153 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2154 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2155 		if (IS_ERR(phydev)) {
2156 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2157 			return NULL;
2158 		}
2159 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2160 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2161 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2162 					MAC_RGMII_ID_TXC_DELAY_EN_);
2163 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2164 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2165 		buf |= HW_CFG_CLK125_EN_;
2166 		buf |= HW_CFG_REFCLK25_EN_;
2167 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2168 	} else {
2169 		if (!phydev->drv) {
2170 			netdev_err(dev->net, "no PHY driver found\n");
2171 			return NULL;
2172 		}
2173 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2174 		/* external PHY fixup for KSZ9031RNX */
2175 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2176 						 ksz9031rnx_fixup);
2177 		if (ret < 0) {
2178 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2179 			return NULL;
2180 		}
2181 		/* external PHY fixup for LAN8835 */
2182 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2183 						 lan8835_fixup);
2184 		if (ret < 0) {
2185 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2186 			return NULL;
2187 		}
2188 		/* add more external PHY fixup here if needed */
2189 
2190 		phydev->is_internal = false;
2191 	}
2192 	return phydev;
2193 }
2194 
2195 static int lan78xx_phy_init(struct lan78xx_net *dev)
2196 {
2197 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2198 	int ret;
2199 	u32 mii_adv;
2200 	struct phy_device *phydev;
2201 
2202 	switch (dev->chipid) {
2203 	case ID_REV_CHIP_ID_7801_:
2204 		phydev = lan7801_phy_init(dev);
2205 		if (!phydev) {
2206 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2207 			return -EIO;
2208 		}
2209 		break;
2210 
2211 	case ID_REV_CHIP_ID_7800_:
2212 	case ID_REV_CHIP_ID_7850_:
2213 		phydev = phy_find_first(dev->mdiobus);
2214 		if (!phydev) {
2215 			netdev_err(dev->net, "no PHY found\n");
2216 			return -EIO;
2217 		}
2218 		phydev->is_internal = true;
2219 		dev->interface = PHY_INTERFACE_MODE_GMII;
2220 		break;
2221 
2222 	default:
2223 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2224 		return -EIO;
2225 	}
2226 
2227 	/* if phyirq is not set, use polling mode in phylib */
2228 	if (dev->domain_data.phyirq > 0)
2229 		phydev->irq = dev->domain_data.phyirq;
2230 	else
2231 		phydev->irq = 0;
2232 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2233 
2234 	/* set to AUTOMDIX */
2235 	phydev->mdix = ETH_TP_MDI_AUTO;
2236 
2237 	ret = phy_connect_direct(dev->net, phydev,
2238 				 lan78xx_link_status_change,
2239 				 dev->interface);
2240 	if (ret) {
2241 		netdev_err(dev->net, "can't attach PHY to %s\n",
2242 			   dev->mdiobus->id);
2243 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2244 			if (phy_is_pseudo_fixed_link(phydev)) {
2245 				fixed_phy_unregister(phydev);
2246 			} else {
2247 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2248 							     0xfffffff0);
2249 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2250 							     0xfffffff0);
2251 			}
2252 		}
2253 		return -EIO;
2254 	}
2255 
2256 	/* MAC doesn't support 1000T Half */
2257 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2258 
2259 	/* support both flow controls */
2260 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2261 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2262 			   phydev->advertising);
2263 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2264 			   phydev->advertising);
2265 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2266 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2267 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2268 
2269 	if (phydev->mdio.dev.of_node) {
2270 		u32 reg;
2271 		int len;
2272 
2273 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2274 						      "microchip,led-modes",
2275 						      sizeof(u32));
2276 		if (len >= 0) {
2277 			/* Ensure the appropriate LEDs are enabled */
2278 			lan78xx_read_reg(dev, HW_CFG, &reg);
2279 			reg &= ~(HW_CFG_LED0_EN_ |
2280 				 HW_CFG_LED1_EN_ |
2281 				 HW_CFG_LED2_EN_ |
2282 				 HW_CFG_LED3_EN_);
2283 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2284 				(len > 1) * HW_CFG_LED1_EN_ |
2285 				(len > 2) * HW_CFG_LED2_EN_ |
2286 				(len > 3) * HW_CFG_LED3_EN_;
2287 			lan78xx_write_reg(dev, HW_CFG, reg);
2288 		}
2289 	}
2290 
2291 	genphy_config_aneg(phydev);
2292 
2293 	dev->fc_autoneg = phydev->autoneg;
2294 
2295 	return 0;
2296 }
2297 
2298 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2299 {
2300 	u32 buf;
2301 	bool rxenabled;
2302 
2303 	lan78xx_read_reg(dev, MAC_RX, &buf);
2304 
2305 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2306 
2307 	if (rxenabled) {
2308 		buf &= ~MAC_RX_RXEN_;
2309 		lan78xx_write_reg(dev, MAC_RX, buf);
2310 	}
2311 
2312 	/* add 4 to size for FCS */
2313 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2314 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2315 
2316 	lan78xx_write_reg(dev, MAC_RX, buf);
2317 
2318 	if (rxenabled) {
2319 		buf |= MAC_RX_RXEN_;
2320 		lan78xx_write_reg(dev, MAC_RX, buf);
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2327 {
2328 	struct sk_buff *skb;
2329 	unsigned long flags;
2330 	int count = 0;
2331 
2332 	spin_lock_irqsave(&q->lock, flags);
2333 	while (!skb_queue_empty(q)) {
2334 		struct skb_data	*entry;
2335 		struct urb *urb;
2336 		int ret;
2337 
2338 		skb_queue_walk(q, skb) {
2339 			entry = (struct skb_data *)skb->cb;
2340 			if (entry->state != unlink_start)
2341 				goto found;
2342 		}
2343 		break;
2344 found:
2345 		entry->state = unlink_start;
2346 		urb = entry->urb;
2347 
2348 		/* Get reference count of the URB to avoid it to be
2349 		 * freed during usb_unlink_urb, which may trigger
2350 		 * use-after-free problem inside usb_unlink_urb since
2351 		 * usb_unlink_urb is always racing with .complete
2352 		 * handler(include defer_bh).
2353 		 */
2354 		usb_get_urb(urb);
2355 		spin_unlock_irqrestore(&q->lock, flags);
2356 		/* during some PM-driven resume scenarios,
2357 		 * these (async) unlinks complete immediately
2358 		 */
2359 		ret = usb_unlink_urb(urb);
2360 		if (ret != -EINPROGRESS && ret != 0)
2361 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2362 		else
2363 			count++;
2364 		usb_put_urb(urb);
2365 		spin_lock_irqsave(&q->lock, flags);
2366 	}
2367 	spin_unlock_irqrestore(&q->lock, flags);
2368 	return count;
2369 }
2370 
2371 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2372 {
2373 	struct lan78xx_net *dev = netdev_priv(netdev);
2374 	int ll_mtu = new_mtu + netdev->hard_header_len;
2375 	int old_hard_mtu = dev->hard_mtu;
2376 	int old_rx_urb_size = dev->rx_urb_size;
2377 	int ret;
2378 
2379 	/* no second zero-length packet read wanted after mtu-sized packets */
2380 	if ((ll_mtu % dev->maxpacket) == 0)
2381 		return -EDOM;
2382 
2383 	ret = usb_autopm_get_interface(dev->intf);
2384 	if (ret < 0)
2385 		return ret;
2386 
2387 	lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2388 
2389 	netdev->mtu = new_mtu;
2390 
2391 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2392 	if (dev->rx_urb_size == old_hard_mtu) {
2393 		dev->rx_urb_size = dev->hard_mtu;
2394 		if (dev->rx_urb_size > old_rx_urb_size) {
2395 			if (netif_running(dev->net)) {
2396 				unlink_urbs(dev, &dev->rxq);
2397 				tasklet_schedule(&dev->bh);
2398 			}
2399 		}
2400 	}
2401 
2402 	usb_autopm_put_interface(dev->intf);
2403 
2404 	return 0;
2405 }
2406 
2407 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2408 {
2409 	struct lan78xx_net *dev = netdev_priv(netdev);
2410 	struct sockaddr *addr = p;
2411 	u32 addr_lo, addr_hi;
2412 
2413 	if (netif_running(netdev))
2414 		return -EBUSY;
2415 
2416 	if (!is_valid_ether_addr(addr->sa_data))
2417 		return -EADDRNOTAVAIL;
2418 
2419 	eth_hw_addr_set(netdev, addr->sa_data);
2420 
2421 	addr_lo = netdev->dev_addr[0] |
2422 		  netdev->dev_addr[1] << 8 |
2423 		  netdev->dev_addr[2] << 16 |
2424 		  netdev->dev_addr[3] << 24;
2425 	addr_hi = netdev->dev_addr[4] |
2426 		  netdev->dev_addr[5] << 8;
2427 
2428 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2429 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2430 
2431 	/* Added to support MAC address changes */
2432 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2433 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2434 
2435 	return 0;
2436 }
2437 
2438 /* Enable or disable Rx checksum offload engine */
2439 static int lan78xx_set_features(struct net_device *netdev,
2440 				netdev_features_t features)
2441 {
2442 	struct lan78xx_net *dev = netdev_priv(netdev);
2443 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2444 	unsigned long flags;
2445 
2446 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2447 
2448 	if (features & NETIF_F_RXCSUM) {
2449 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2450 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2451 	} else {
2452 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2453 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2454 	}
2455 
2456 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2457 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2458 	else
2459 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2460 
2461 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2462 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2463 	else
2464 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2465 
2466 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2467 
2468 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2469 
2470 	return 0;
2471 }
2472 
2473 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2474 {
2475 	struct lan78xx_priv *pdata =
2476 			container_of(param, struct lan78xx_priv, set_vlan);
2477 	struct lan78xx_net *dev = pdata->dev;
2478 
2479 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2480 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2481 }
2482 
2483 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2484 				   __be16 proto, u16 vid)
2485 {
2486 	struct lan78xx_net *dev = netdev_priv(netdev);
2487 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2488 	u16 vid_bit_index;
2489 	u16 vid_dword_index;
2490 
2491 	vid_dword_index = (vid >> 5) & 0x7F;
2492 	vid_bit_index = vid & 0x1F;
2493 
2494 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2495 
2496 	/* defer register writes to a sleepable context */
2497 	schedule_work(&pdata->set_vlan);
2498 
2499 	return 0;
2500 }
2501 
2502 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2503 				    __be16 proto, u16 vid)
2504 {
2505 	struct lan78xx_net *dev = netdev_priv(netdev);
2506 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2507 	u16 vid_bit_index;
2508 	u16 vid_dword_index;
2509 
2510 	vid_dword_index = (vid >> 5) & 0x7F;
2511 	vid_bit_index = vid & 0x1F;
2512 
2513 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2514 
2515 	/* defer register writes to a sleepable context */
2516 	schedule_work(&pdata->set_vlan);
2517 
2518 	return 0;
2519 }
2520 
2521 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2522 {
2523 	int ret;
2524 	u32 buf;
2525 	u32 regs[6] = { 0 };
2526 
2527 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2528 	if (buf & USB_CFG1_LTM_ENABLE_) {
2529 		u8 temp[2];
2530 		/* Get values from EEPROM first */
2531 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2532 			if (temp[0] == 24) {
2533 				ret = lan78xx_read_raw_eeprom(dev,
2534 							      temp[1] * 2,
2535 							      24,
2536 							      (u8 *)regs);
2537 				if (ret < 0)
2538 					return;
2539 			}
2540 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2541 			if (temp[0] == 24) {
2542 				ret = lan78xx_read_raw_otp(dev,
2543 							   temp[1] * 2,
2544 							   24,
2545 							   (u8 *)regs);
2546 				if (ret < 0)
2547 					return;
2548 			}
2549 		}
2550 	}
2551 
2552 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2553 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2554 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2555 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2556 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2557 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2558 }
2559 
2560 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2561 {
2562 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2563 }
2564 
2565 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2566 			   u32 hw_disabled)
2567 {
2568 	unsigned long timeout;
2569 	bool stopped = true;
2570 	int ret;
2571 	u32 buf;
2572 
2573 	/* Stop the h/w block (if not already stopped) */
2574 
2575 	ret = lan78xx_read_reg(dev, reg, &buf);
2576 	if (ret < 0)
2577 		return ret;
2578 
2579 	if (buf & hw_enabled) {
2580 		buf &= ~hw_enabled;
2581 
2582 		ret = lan78xx_write_reg(dev, reg, buf);
2583 		if (ret < 0)
2584 			return ret;
2585 
2586 		stopped = false;
2587 		timeout = jiffies + HW_DISABLE_TIMEOUT;
2588 		do  {
2589 			ret = lan78xx_read_reg(dev, reg, &buf);
2590 			if (ret < 0)
2591 				return ret;
2592 
2593 			if (buf & hw_disabled)
2594 				stopped = true;
2595 			else
2596 				msleep(HW_DISABLE_DELAY_MS);
2597 		} while (!stopped && !time_after(jiffies, timeout));
2598 	}
2599 
2600 	ret = stopped ? 0 : -ETIME;
2601 
2602 	return ret;
2603 }
2604 
2605 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2606 {
2607 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2608 }
2609 
2610 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2611 {
2612 	int ret;
2613 
2614 	netif_dbg(dev, drv, dev->net, "start tx path");
2615 
2616 	/* Start the MAC transmitter */
2617 
2618 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2619 	if (ret < 0)
2620 		return ret;
2621 
2622 	/* Start the Tx FIFO */
2623 
2624 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2625 	if (ret < 0)
2626 		return ret;
2627 
2628 	return 0;
2629 }
2630 
2631 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2632 {
2633 	int ret;
2634 
2635 	netif_dbg(dev, drv, dev->net, "stop tx path");
2636 
2637 	/* Stop the Tx FIFO */
2638 
2639 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2640 	if (ret < 0)
2641 		return ret;
2642 
2643 	/* Stop the MAC transmitter */
2644 
2645 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2646 	if (ret < 0)
2647 		return ret;
2648 
2649 	return 0;
2650 }
2651 
2652 /* The caller must ensure the Tx path is stopped before calling
2653  * lan78xx_flush_tx_fifo().
2654  */
2655 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2656 {
2657 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2658 }
2659 
2660 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2661 {
2662 	int ret;
2663 
2664 	netif_dbg(dev, drv, dev->net, "start rx path");
2665 
2666 	/* Start the Rx FIFO */
2667 
2668 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2669 	if (ret < 0)
2670 		return ret;
2671 
2672 	/* Start the MAC receiver*/
2673 
2674 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2675 	if (ret < 0)
2676 		return ret;
2677 
2678 	return 0;
2679 }
2680 
2681 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2682 {
2683 	int ret;
2684 
2685 	netif_dbg(dev, drv, dev->net, "stop rx path");
2686 
2687 	/* Stop the MAC receiver */
2688 
2689 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2690 	if (ret < 0)
2691 		return ret;
2692 
2693 	/* Stop the Rx FIFO */
2694 
2695 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2696 	if (ret < 0)
2697 		return ret;
2698 
2699 	return 0;
2700 }
2701 
2702 /* The caller must ensure the Rx path is stopped before calling
2703  * lan78xx_flush_rx_fifo().
2704  */
2705 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2706 {
2707 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2708 }
2709 
2710 static int lan78xx_reset(struct lan78xx_net *dev)
2711 {
2712 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2713 	unsigned long timeout;
2714 	int ret;
2715 	u32 buf;
2716 	u8 sig;
2717 
2718 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2719 	if (ret < 0)
2720 		return ret;
2721 
2722 	buf |= HW_CFG_LRST_;
2723 
2724 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2725 	if (ret < 0)
2726 		return ret;
2727 
2728 	timeout = jiffies + HZ;
2729 	do {
2730 		mdelay(1);
2731 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2732 		if (ret < 0)
2733 			return ret;
2734 
2735 		if (time_after(jiffies, timeout)) {
2736 			netdev_warn(dev->net,
2737 				    "timeout on completion of LiteReset");
2738 			ret = -ETIMEDOUT;
2739 			return ret;
2740 		}
2741 	} while (buf & HW_CFG_LRST_);
2742 
2743 	lan78xx_init_mac_address(dev);
2744 
2745 	/* save DEVID for later usage */
2746 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2747 	if (ret < 0)
2748 		return ret;
2749 
2750 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2751 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2752 
2753 	/* Respond to the IN token with a NAK */
2754 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2755 	if (ret < 0)
2756 		return ret;
2757 
2758 	buf |= USB_CFG_BIR_;
2759 
2760 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2761 	if (ret < 0)
2762 		return ret;
2763 
2764 	/* Init LTM */
2765 	lan78xx_init_ltm(dev);
2766 
2767 	if (dev->udev->speed == USB_SPEED_SUPER) {
2768 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2769 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2770 		dev->rx_qlen = 4;
2771 		dev->tx_qlen = 4;
2772 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2773 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2774 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2775 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2776 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2777 	} else {
2778 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2779 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2780 		dev->rx_qlen = 4;
2781 		dev->tx_qlen = 4;
2782 	}
2783 
2784 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2785 	if (ret < 0)
2786 		return ret;
2787 
2788 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2789 	if (ret < 0)
2790 		return ret;
2791 
2792 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2793 	if (ret < 0)
2794 		return ret;
2795 
2796 	buf |= HW_CFG_MEF_;
2797 
2798 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2799 	if (ret < 0)
2800 		return ret;
2801 
2802 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2803 	if (ret < 0)
2804 		return ret;
2805 
2806 	buf |= USB_CFG_BCE_;
2807 
2808 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2809 	if (ret < 0)
2810 		return ret;
2811 
2812 	/* set FIFO sizes */
2813 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2814 
2815 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2816 	if (ret < 0)
2817 		return ret;
2818 
2819 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2820 
2821 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2822 	if (ret < 0)
2823 		return ret;
2824 
2825 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2826 	if (ret < 0)
2827 		return ret;
2828 
2829 	ret = lan78xx_write_reg(dev, FLOW, 0);
2830 	if (ret < 0)
2831 		return ret;
2832 
2833 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2834 	if (ret < 0)
2835 		return ret;
2836 
2837 	/* Don't need rfe_ctl_lock during initialisation */
2838 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2839 	if (ret < 0)
2840 		return ret;
2841 
2842 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2843 
2844 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2845 	if (ret < 0)
2846 		return ret;
2847 
2848 	/* Enable or disable checksum offload engines */
2849 	ret = lan78xx_set_features(dev->net, dev->net->features);
2850 	if (ret < 0)
2851 		return ret;
2852 
2853 	lan78xx_set_multicast(dev->net);
2854 
2855 	/* reset PHY */
2856 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2857 	if (ret < 0)
2858 		return ret;
2859 
2860 	buf |= PMT_CTL_PHY_RST_;
2861 
2862 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2863 	if (ret < 0)
2864 		return ret;
2865 
2866 	timeout = jiffies + HZ;
2867 	do {
2868 		mdelay(1);
2869 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2870 		if (ret < 0)
2871 			return ret;
2872 
2873 		if (time_after(jiffies, timeout)) {
2874 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2875 			ret = -ETIMEDOUT;
2876 			return ret;
2877 		}
2878 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2879 
2880 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2881 	if (ret < 0)
2882 		return ret;
2883 
2884 	/* LAN7801 only has RGMII mode */
2885 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2886 		buf &= ~MAC_CR_GMII_EN_;
2887 
2888 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2889 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2890 		if (!ret && sig != EEPROM_INDICATOR) {
2891 			/* Implies there is no external eeprom. Set mac speed */
2892 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2893 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2894 		}
2895 	}
2896 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2897 	if (ret < 0)
2898 		return ret;
2899 
2900 	ret = lan78xx_set_rx_max_frame_length(dev,
2901 					      dev->net->mtu + VLAN_ETH_HLEN);
2902 
2903 	return ret;
2904 }
2905 
2906 static void lan78xx_init_stats(struct lan78xx_net *dev)
2907 {
2908 	u32 *p;
2909 	int i;
2910 
2911 	/* initialize for stats update
2912 	 * some counters are 20bits and some are 32bits
2913 	 */
2914 	p = (u32 *)&dev->stats.rollover_max;
2915 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2916 		p[i] = 0xFFFFF;
2917 
2918 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2919 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2920 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2921 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2922 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2923 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2924 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2925 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2926 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2927 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2928 
2929 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2930 }
2931 
2932 static int lan78xx_open(struct net_device *net)
2933 {
2934 	struct lan78xx_net *dev = netdev_priv(net);
2935 	int ret;
2936 
2937 	netif_dbg(dev, ifup, dev->net, "open device");
2938 
2939 	ret = usb_autopm_get_interface(dev->intf);
2940 	if (ret < 0)
2941 		return ret;
2942 
2943 	mutex_lock(&dev->dev_mutex);
2944 
2945 	phy_start(net->phydev);
2946 
2947 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2948 
2949 	/* for Link Check */
2950 	if (dev->urb_intr) {
2951 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2952 		if (ret < 0) {
2953 			netif_err(dev, ifup, dev->net,
2954 				  "intr submit %d\n", ret);
2955 			goto done;
2956 		}
2957 	}
2958 
2959 	ret = lan78xx_flush_rx_fifo(dev);
2960 	if (ret < 0)
2961 		goto done;
2962 	ret = lan78xx_flush_tx_fifo(dev);
2963 	if (ret < 0)
2964 		goto done;
2965 
2966 	ret = lan78xx_start_tx_path(dev);
2967 	if (ret < 0)
2968 		goto done;
2969 	ret = lan78xx_start_rx_path(dev);
2970 	if (ret < 0)
2971 		goto done;
2972 
2973 	lan78xx_init_stats(dev);
2974 
2975 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2976 
2977 	netif_start_queue(net);
2978 
2979 	dev->link_on = false;
2980 
2981 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2982 done:
2983 	mutex_unlock(&dev->dev_mutex);
2984 
2985 	usb_autopm_put_interface(dev->intf);
2986 
2987 	return ret;
2988 }
2989 
2990 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2991 {
2992 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2993 	DECLARE_WAITQUEUE(wait, current);
2994 	int temp;
2995 
2996 	/* ensure there are no more active urbs */
2997 	add_wait_queue(&unlink_wakeup, &wait);
2998 	set_current_state(TASK_UNINTERRUPTIBLE);
2999 	dev->wait = &unlink_wakeup;
3000 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3001 
3002 	/* maybe wait for deletions to finish. */
3003 	while (!skb_queue_empty(&dev->rxq) ||
3004 	       !skb_queue_empty(&dev->txq)) {
3005 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3006 		set_current_state(TASK_UNINTERRUPTIBLE);
3007 		netif_dbg(dev, ifdown, dev->net,
3008 			  "waited for %d urb completions", temp);
3009 	}
3010 	set_current_state(TASK_RUNNING);
3011 	dev->wait = NULL;
3012 	remove_wait_queue(&unlink_wakeup, &wait);
3013 
3014 	while (!skb_queue_empty(&dev->done)) {
3015 		struct skb_data *entry;
3016 		struct sk_buff *skb;
3017 
3018 		skb = skb_dequeue(&dev->done);
3019 		entry = (struct skb_data *)(skb->cb);
3020 		usb_free_urb(entry->urb);
3021 		dev_kfree_skb(skb);
3022 	}
3023 }
3024 
3025 static int lan78xx_stop(struct net_device *net)
3026 {
3027 	struct lan78xx_net *dev = netdev_priv(net);
3028 
3029 	netif_dbg(dev, ifup, dev->net, "stop device");
3030 
3031 	mutex_lock(&dev->dev_mutex);
3032 
3033 	if (timer_pending(&dev->stat_monitor))
3034 		del_timer_sync(&dev->stat_monitor);
3035 
3036 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3037 	netif_stop_queue(net);
3038 	tasklet_kill(&dev->bh);
3039 
3040 	lan78xx_terminate_urbs(dev);
3041 
3042 	netif_info(dev, ifdown, dev->net,
3043 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3044 		   net->stats.rx_packets, net->stats.tx_packets,
3045 		   net->stats.rx_errors, net->stats.tx_errors);
3046 
3047 	/* ignore errors that occur stopping the Tx and Rx data paths */
3048 	lan78xx_stop_tx_path(dev);
3049 	lan78xx_stop_rx_path(dev);
3050 
3051 	if (net->phydev)
3052 		phy_stop(net->phydev);
3053 
3054 	usb_kill_urb(dev->urb_intr);
3055 
3056 	/* deferred work (task, timer, softirq) must also stop.
3057 	 * can't flush_scheduled_work() until we drop rtnl (later),
3058 	 * else workers could deadlock; so make workers a NOP.
3059 	 */
3060 	clear_bit(EVENT_TX_HALT, &dev->flags);
3061 	clear_bit(EVENT_RX_HALT, &dev->flags);
3062 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3063 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3064 
3065 	cancel_delayed_work_sync(&dev->wq);
3066 
3067 	usb_autopm_put_interface(dev->intf);
3068 
3069 	mutex_unlock(&dev->dev_mutex);
3070 
3071 	return 0;
3072 }
3073 
3074 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
3075 				       struct sk_buff *skb, gfp_t flags)
3076 {
3077 	u32 tx_cmd_a, tx_cmd_b;
3078 	void *ptr;
3079 
3080 	if (skb_cow_head(skb, TX_OVERHEAD)) {
3081 		dev_kfree_skb_any(skb);
3082 		return NULL;
3083 	}
3084 
3085 	if (skb_linearize(skb)) {
3086 		dev_kfree_skb_any(skb);
3087 		return NULL;
3088 	}
3089 
3090 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3091 
3092 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3093 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3094 
3095 	tx_cmd_b = 0;
3096 	if (skb_is_gso(skb)) {
3097 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3098 
3099 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3100 
3101 		tx_cmd_a |= TX_CMD_A_LSO_;
3102 	}
3103 
3104 	if (skb_vlan_tag_present(skb)) {
3105 		tx_cmd_a |= TX_CMD_A_IVTG_;
3106 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3107 	}
3108 
3109 	ptr = skb_push(skb, 8);
3110 	put_unaligned_le32(tx_cmd_a, ptr);
3111 	put_unaligned_le32(tx_cmd_b, ptr + 4);
3112 
3113 	return skb;
3114 }
3115 
3116 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3117 			       struct sk_buff_head *list, enum skb_state state)
3118 {
3119 	unsigned long flags;
3120 	enum skb_state old_state;
3121 	struct skb_data *entry = (struct skb_data *)skb->cb;
3122 
3123 	spin_lock_irqsave(&list->lock, flags);
3124 	old_state = entry->state;
3125 	entry->state = state;
3126 
3127 	__skb_unlink(skb, list);
3128 	spin_unlock(&list->lock);
3129 	spin_lock(&dev->done.lock);
3130 
3131 	__skb_queue_tail(&dev->done, skb);
3132 	if (skb_queue_len(&dev->done) == 1)
3133 		tasklet_schedule(&dev->bh);
3134 	spin_unlock_irqrestore(&dev->done.lock, flags);
3135 
3136 	return old_state;
3137 }
3138 
3139 static void tx_complete(struct urb *urb)
3140 {
3141 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3142 	struct skb_data *entry = (struct skb_data *)skb->cb;
3143 	struct lan78xx_net *dev = entry->dev;
3144 
3145 	if (urb->status == 0) {
3146 		dev->net->stats.tx_packets += entry->num_of_packet;
3147 		dev->net->stats.tx_bytes += entry->length;
3148 	} else {
3149 		dev->net->stats.tx_errors++;
3150 
3151 		switch (urb->status) {
3152 		case -EPIPE:
3153 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3154 			break;
3155 
3156 		/* software-driven interface shutdown */
3157 		case -ECONNRESET:
3158 		case -ESHUTDOWN:
3159 			netif_dbg(dev, tx_err, dev->net,
3160 				  "tx err interface gone %d\n",
3161 				  entry->urb->status);
3162 			break;
3163 
3164 		case -EPROTO:
3165 		case -ETIME:
3166 		case -EILSEQ:
3167 			netif_stop_queue(dev->net);
3168 			netif_dbg(dev, tx_err, dev->net,
3169 				  "tx err queue stopped %d\n",
3170 				  entry->urb->status);
3171 			break;
3172 		default:
3173 			netif_dbg(dev, tx_err, dev->net,
3174 				  "unknown tx err %d\n",
3175 				  entry->urb->status);
3176 			break;
3177 		}
3178 	}
3179 
3180 	usb_autopm_put_interface_async(dev->intf);
3181 
3182 	defer_bh(dev, skb, &dev->txq, tx_done);
3183 }
3184 
3185 static void lan78xx_queue_skb(struct sk_buff_head *list,
3186 			      struct sk_buff *newsk, enum skb_state state)
3187 {
3188 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3189 
3190 	__skb_queue_tail(list, newsk);
3191 	entry->state = state;
3192 }
3193 
3194 static netdev_tx_t
3195 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3196 {
3197 	struct lan78xx_net *dev = netdev_priv(net);
3198 	struct sk_buff *skb2 = NULL;
3199 
3200 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3201 		schedule_delayed_work(&dev->wq, 0);
3202 
3203 	if (skb) {
3204 		skb_tx_timestamp(skb);
3205 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
3206 	}
3207 
3208 	if (skb2) {
3209 		skb_queue_tail(&dev->txq_pend, skb2);
3210 
3211 		/* throttle TX patch at slower than SUPER SPEED USB */
3212 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
3213 		    (skb_queue_len(&dev->txq_pend) > 10))
3214 			netif_stop_queue(net);
3215 	} else {
3216 		netif_dbg(dev, tx_err, dev->net,
3217 			  "lan78xx_tx_prep return NULL\n");
3218 		dev->net->stats.tx_errors++;
3219 		dev->net->stats.tx_dropped++;
3220 	}
3221 
3222 	tasklet_schedule(&dev->bh);
3223 
3224 	return NETDEV_TX_OK;
3225 }
3226 
3227 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3228 {
3229 	struct lan78xx_priv *pdata = NULL;
3230 	int ret;
3231 	int i;
3232 
3233 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3234 
3235 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3236 	if (!pdata) {
3237 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3238 		return -ENOMEM;
3239 	}
3240 
3241 	pdata->dev = dev;
3242 
3243 	spin_lock_init(&pdata->rfe_ctl_lock);
3244 	mutex_init(&pdata->dataport_mutex);
3245 
3246 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3247 
3248 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3249 		pdata->vlan_table[i] = 0;
3250 
3251 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3252 
3253 	dev->net->features = 0;
3254 
3255 	if (DEFAULT_TX_CSUM_ENABLE)
3256 		dev->net->features |= NETIF_F_HW_CSUM;
3257 
3258 	if (DEFAULT_RX_CSUM_ENABLE)
3259 		dev->net->features |= NETIF_F_RXCSUM;
3260 
3261 	if (DEFAULT_TSO_CSUM_ENABLE)
3262 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3263 
3264 	if (DEFAULT_VLAN_RX_OFFLOAD)
3265 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3266 
3267 	if (DEFAULT_VLAN_FILTER_ENABLE)
3268 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3269 
3270 	dev->net->hw_features = dev->net->features;
3271 
3272 	ret = lan78xx_setup_irq_domain(dev);
3273 	if (ret < 0) {
3274 		netdev_warn(dev->net,
3275 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3276 		goto out1;
3277 	}
3278 
3279 	dev->net->hard_header_len += TX_OVERHEAD;
3280 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3281 
3282 	/* Init all registers */
3283 	ret = lan78xx_reset(dev);
3284 	if (ret) {
3285 		netdev_warn(dev->net, "Registers INIT FAILED....");
3286 		goto out2;
3287 	}
3288 
3289 	ret = lan78xx_mdio_init(dev);
3290 	if (ret) {
3291 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3292 		goto out2;
3293 	}
3294 
3295 	dev->net->flags |= IFF_MULTICAST;
3296 
3297 	pdata->wol = WAKE_MAGIC;
3298 
3299 	return ret;
3300 
3301 out2:
3302 	lan78xx_remove_irq_domain(dev);
3303 
3304 out1:
3305 	netdev_warn(dev->net, "Bind routine FAILED");
3306 	cancel_work_sync(&pdata->set_multicast);
3307 	cancel_work_sync(&pdata->set_vlan);
3308 	kfree(pdata);
3309 	return ret;
3310 }
3311 
3312 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3313 {
3314 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3315 
3316 	lan78xx_remove_irq_domain(dev);
3317 
3318 	lan78xx_remove_mdio(dev);
3319 
3320 	if (pdata) {
3321 		cancel_work_sync(&pdata->set_multicast);
3322 		cancel_work_sync(&pdata->set_vlan);
3323 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3324 		kfree(pdata);
3325 		pdata = NULL;
3326 		dev->data[0] = 0;
3327 	}
3328 }
3329 
3330 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3331 				    struct sk_buff *skb,
3332 				    u32 rx_cmd_a, u32 rx_cmd_b)
3333 {
3334 	/* HW Checksum offload appears to be flawed if used when not stripping
3335 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3336 	 */
3337 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3338 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3339 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3340 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3341 		skb->ip_summed = CHECKSUM_NONE;
3342 	} else {
3343 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3344 		skb->ip_summed = CHECKSUM_COMPLETE;
3345 	}
3346 }
3347 
3348 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3349 				    struct sk_buff *skb,
3350 				    u32 rx_cmd_a, u32 rx_cmd_b)
3351 {
3352 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3353 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3354 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3355 				       (rx_cmd_b & 0xffff));
3356 }
3357 
3358 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3359 {
3360 	int status;
3361 
3362 	dev->net->stats.rx_packets++;
3363 	dev->net->stats.rx_bytes += skb->len;
3364 
3365 	skb->protocol = eth_type_trans(skb, dev->net);
3366 
3367 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3368 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3369 	memset(skb->cb, 0, sizeof(struct skb_data));
3370 
3371 	if (skb_defer_rx_timestamp(skb))
3372 		return;
3373 
3374 	status = netif_rx(skb);
3375 	if (status != NET_RX_SUCCESS)
3376 		netif_dbg(dev, rx_err, dev->net,
3377 			  "netif_rx status %d\n", status);
3378 }
3379 
3380 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3381 {
3382 	if (skb->len < dev->net->hard_header_len)
3383 		return 0;
3384 
3385 	while (skb->len > 0) {
3386 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3387 		u16 rx_cmd_c;
3388 		struct sk_buff *skb2;
3389 		unsigned char *packet;
3390 
3391 		rx_cmd_a = get_unaligned_le32(skb->data);
3392 		skb_pull(skb, sizeof(rx_cmd_a));
3393 
3394 		rx_cmd_b = get_unaligned_le32(skb->data);
3395 		skb_pull(skb, sizeof(rx_cmd_b));
3396 
3397 		rx_cmd_c = get_unaligned_le16(skb->data);
3398 		skb_pull(skb, sizeof(rx_cmd_c));
3399 
3400 		packet = skb->data;
3401 
3402 		/* get the packet length */
3403 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3404 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3405 
3406 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3407 			netif_dbg(dev, rx_err, dev->net,
3408 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3409 		} else {
3410 			/* last frame in this batch */
3411 			if (skb->len == size) {
3412 				lan78xx_rx_csum_offload(dev, skb,
3413 							rx_cmd_a, rx_cmd_b);
3414 				lan78xx_rx_vlan_offload(dev, skb,
3415 							rx_cmd_a, rx_cmd_b);
3416 
3417 				skb_trim(skb, skb->len - 4); /* remove fcs */
3418 				skb->truesize = size + sizeof(struct sk_buff);
3419 
3420 				return 1;
3421 			}
3422 
3423 			skb2 = skb_clone(skb, GFP_ATOMIC);
3424 			if (unlikely(!skb2)) {
3425 				netdev_warn(dev->net, "Error allocating skb");
3426 				return 0;
3427 			}
3428 
3429 			skb2->len = size;
3430 			skb2->data = packet;
3431 			skb_set_tail_pointer(skb2, size);
3432 
3433 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3434 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3435 
3436 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3437 			skb2->truesize = size + sizeof(struct sk_buff);
3438 
3439 			lan78xx_skb_return(dev, skb2);
3440 		}
3441 
3442 		skb_pull(skb, size);
3443 
3444 		/* padding bytes before the next frame starts */
3445 		if (skb->len)
3446 			skb_pull(skb, align_count);
3447 	}
3448 
3449 	return 1;
3450 }
3451 
3452 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3453 {
3454 	if (!lan78xx_rx(dev, skb)) {
3455 		dev->net->stats.rx_errors++;
3456 		goto done;
3457 	}
3458 
3459 	if (skb->len) {
3460 		lan78xx_skb_return(dev, skb);
3461 		return;
3462 	}
3463 
3464 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3465 	dev->net->stats.rx_errors++;
3466 done:
3467 	skb_queue_tail(&dev->done, skb);
3468 }
3469 
3470 static void rx_complete(struct urb *urb);
3471 
3472 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3473 {
3474 	struct sk_buff *skb;
3475 	struct skb_data *entry;
3476 	unsigned long lockflags;
3477 	size_t size = dev->rx_urb_size;
3478 	int ret = 0;
3479 
3480 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3481 	if (!skb) {
3482 		usb_free_urb(urb);
3483 		return -ENOMEM;
3484 	}
3485 
3486 	entry = (struct skb_data *)skb->cb;
3487 	entry->urb = urb;
3488 	entry->dev = dev;
3489 	entry->length = 0;
3490 
3491 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3492 			  skb->data, size, rx_complete, skb);
3493 
3494 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3495 
3496 	if (netif_device_present(dev->net) &&
3497 	    netif_running(dev->net) &&
3498 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3499 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3500 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3501 		switch (ret) {
3502 		case 0:
3503 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3504 			break;
3505 		case -EPIPE:
3506 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3507 			break;
3508 		case -ENODEV:
3509 		case -ENOENT:
3510 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3511 			netif_device_detach(dev->net);
3512 			break;
3513 		case -EHOSTUNREACH:
3514 			ret = -ENOLINK;
3515 			break;
3516 		default:
3517 			netif_dbg(dev, rx_err, dev->net,
3518 				  "rx submit, %d\n", ret);
3519 			tasklet_schedule(&dev->bh);
3520 		}
3521 	} else {
3522 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3523 		ret = -ENOLINK;
3524 	}
3525 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3526 	if (ret) {
3527 		dev_kfree_skb_any(skb);
3528 		usb_free_urb(urb);
3529 	}
3530 	return ret;
3531 }
3532 
3533 static void rx_complete(struct urb *urb)
3534 {
3535 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3536 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3537 	struct lan78xx_net *dev = entry->dev;
3538 	int urb_status = urb->status;
3539 	enum skb_state state;
3540 
3541 	skb_put(skb, urb->actual_length);
3542 	state = rx_done;
3543 	entry->urb = NULL;
3544 
3545 	switch (urb_status) {
3546 	case 0:
3547 		if (skb->len < dev->net->hard_header_len) {
3548 			state = rx_cleanup;
3549 			dev->net->stats.rx_errors++;
3550 			dev->net->stats.rx_length_errors++;
3551 			netif_dbg(dev, rx_err, dev->net,
3552 				  "rx length %d\n", skb->len);
3553 		}
3554 		usb_mark_last_busy(dev->udev);
3555 		break;
3556 	case -EPIPE:
3557 		dev->net->stats.rx_errors++;
3558 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3559 		fallthrough;
3560 	case -ECONNRESET:				/* async unlink */
3561 	case -ESHUTDOWN:				/* hardware gone */
3562 		netif_dbg(dev, ifdown, dev->net,
3563 			  "rx shutdown, code %d\n", urb_status);
3564 		state = rx_cleanup;
3565 		entry->urb = urb;
3566 		urb = NULL;
3567 		break;
3568 	case -EPROTO:
3569 	case -ETIME:
3570 	case -EILSEQ:
3571 		dev->net->stats.rx_errors++;
3572 		state = rx_cleanup;
3573 		entry->urb = urb;
3574 		urb = NULL;
3575 		break;
3576 
3577 	/* data overrun ... flush fifo? */
3578 	case -EOVERFLOW:
3579 		dev->net->stats.rx_over_errors++;
3580 		fallthrough;
3581 
3582 	default:
3583 		state = rx_cleanup;
3584 		dev->net->stats.rx_errors++;
3585 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3586 		break;
3587 	}
3588 
3589 	state = defer_bh(dev, skb, &dev->rxq, state);
3590 
3591 	if (urb) {
3592 		if (netif_running(dev->net) &&
3593 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3594 		    state != unlink_start) {
3595 			rx_submit(dev, urb, GFP_ATOMIC);
3596 			return;
3597 		}
3598 		usb_free_urb(urb);
3599 	}
3600 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3601 }
3602 
3603 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3604 {
3605 	int length;
3606 	struct urb *urb = NULL;
3607 	struct skb_data *entry;
3608 	unsigned long flags;
3609 	struct sk_buff_head *tqp = &dev->txq_pend;
3610 	struct sk_buff *skb, *skb2;
3611 	int ret;
3612 	int count, pos;
3613 	int skb_totallen, pkt_cnt;
3614 
3615 	skb_totallen = 0;
3616 	pkt_cnt = 0;
3617 	count = 0;
3618 	length = 0;
3619 	spin_lock_irqsave(&tqp->lock, flags);
3620 	skb_queue_walk(tqp, skb) {
3621 		if (skb_is_gso(skb)) {
3622 			if (!skb_queue_is_first(tqp, skb)) {
3623 				/* handle previous packets first */
3624 				break;
3625 			}
3626 			count = 1;
3627 			length = skb->len - TX_OVERHEAD;
3628 			__skb_unlink(skb, tqp);
3629 			spin_unlock_irqrestore(&tqp->lock, flags);
3630 			goto gso_skb;
3631 		}
3632 
3633 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3634 			break;
3635 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3636 		pkt_cnt++;
3637 	}
3638 	spin_unlock_irqrestore(&tqp->lock, flags);
3639 
3640 	/* copy to a single skb */
3641 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3642 	if (!skb)
3643 		goto drop;
3644 
3645 	skb_put(skb, skb_totallen);
3646 
3647 	for (count = pos = 0; count < pkt_cnt; count++) {
3648 		skb2 = skb_dequeue(tqp);
3649 		if (skb2) {
3650 			length += (skb2->len - TX_OVERHEAD);
3651 			memcpy(skb->data + pos, skb2->data, skb2->len);
3652 			pos += roundup(skb2->len, sizeof(u32));
3653 			dev_kfree_skb(skb2);
3654 		}
3655 	}
3656 
3657 gso_skb:
3658 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3659 	if (!urb)
3660 		goto drop;
3661 
3662 	entry = (struct skb_data *)skb->cb;
3663 	entry->urb = urb;
3664 	entry->dev = dev;
3665 	entry->length = length;
3666 	entry->num_of_packet = count;
3667 
3668 	spin_lock_irqsave(&dev->txq.lock, flags);
3669 	ret = usb_autopm_get_interface_async(dev->intf);
3670 	if (ret < 0) {
3671 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3672 		goto drop;
3673 	}
3674 
3675 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3676 			  skb->data, skb->len, tx_complete, skb);
3677 
3678 	if (length % dev->maxpacket == 0) {
3679 		/* send USB_ZERO_PACKET */
3680 		urb->transfer_flags |= URB_ZERO_PACKET;
3681 	}
3682 
3683 #ifdef CONFIG_PM
3684 	/* if this triggers the device is still a sleep */
3685 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3686 		/* transmission will be done in resume */
3687 		usb_anchor_urb(urb, &dev->deferred);
3688 		/* no use to process more packets */
3689 		netif_stop_queue(dev->net);
3690 		usb_put_urb(urb);
3691 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3692 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3693 		return;
3694 	}
3695 #endif
3696 
3697 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3698 	switch (ret) {
3699 	case 0:
3700 		netif_trans_update(dev->net);
3701 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3702 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3703 			netif_stop_queue(dev->net);
3704 		break;
3705 	case -EPIPE:
3706 		netif_stop_queue(dev->net);
3707 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3708 		usb_autopm_put_interface_async(dev->intf);
3709 		break;
3710 	case -ENODEV:
3711 	case -ENOENT:
3712 		netif_dbg(dev, tx_err, dev->net,
3713 			  "tx: submit urb err %d (disconnected?)", ret);
3714 		netif_device_detach(dev->net);
3715 		break;
3716 	default:
3717 		usb_autopm_put_interface_async(dev->intf);
3718 		netif_dbg(dev, tx_err, dev->net,
3719 			  "tx: submit urb err %d\n", ret);
3720 		break;
3721 	}
3722 
3723 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3724 
3725 	if (ret) {
3726 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3727 drop:
3728 		dev->net->stats.tx_dropped++;
3729 		if (skb)
3730 			dev_kfree_skb_any(skb);
3731 		usb_free_urb(urb);
3732 	} else {
3733 		netif_dbg(dev, tx_queued, dev->net,
3734 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3735 	}
3736 }
3737 
3738 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3739 {
3740 	struct urb *urb;
3741 	int i;
3742 
3743 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3744 		for (i = 0; i < 10; i++) {
3745 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3746 				break;
3747 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3748 			if (urb)
3749 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3750 					return;
3751 		}
3752 
3753 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3754 			tasklet_schedule(&dev->bh);
3755 	}
3756 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3757 		netif_wake_queue(dev->net);
3758 }
3759 
3760 static void lan78xx_bh(struct tasklet_struct *t)
3761 {
3762 	struct lan78xx_net *dev = from_tasklet(dev, t, bh);
3763 	struct sk_buff *skb;
3764 	struct skb_data *entry;
3765 
3766 	while ((skb = skb_dequeue(&dev->done))) {
3767 		entry = (struct skb_data *)(skb->cb);
3768 		switch (entry->state) {
3769 		case rx_done:
3770 			entry->state = rx_cleanup;
3771 			rx_process(dev, skb);
3772 			continue;
3773 		case tx_done:
3774 			usb_free_urb(entry->urb);
3775 			dev_kfree_skb(skb);
3776 			continue;
3777 		case rx_cleanup:
3778 			usb_free_urb(entry->urb);
3779 			dev_kfree_skb(skb);
3780 			continue;
3781 		default:
3782 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3783 			return;
3784 		}
3785 	}
3786 
3787 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3788 		/* reset update timer delta */
3789 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3790 			dev->delta = 1;
3791 			mod_timer(&dev->stat_monitor,
3792 				  jiffies + STAT_UPDATE_TIMER);
3793 		}
3794 
3795 		if (!skb_queue_empty(&dev->txq_pend))
3796 			lan78xx_tx_bh(dev);
3797 
3798 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
3799 			lan78xx_rx_bh(dev);
3800 	}
3801 }
3802 
3803 static void lan78xx_delayedwork(struct work_struct *work)
3804 {
3805 	int status;
3806 	struct lan78xx_net *dev;
3807 
3808 	dev = container_of(work, struct lan78xx_net, wq.work);
3809 
3810 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
3811 		return;
3812 
3813 	if (usb_autopm_get_interface(dev->intf) < 0)
3814 		return;
3815 
3816 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3817 		unlink_urbs(dev, &dev->txq);
3818 
3819 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3820 		if (status < 0 &&
3821 		    status != -EPIPE &&
3822 		    status != -ESHUTDOWN) {
3823 			if (netif_msg_tx_err(dev))
3824 				netdev_err(dev->net,
3825 					   "can't clear tx halt, status %d\n",
3826 					   status);
3827 		} else {
3828 			clear_bit(EVENT_TX_HALT, &dev->flags);
3829 			if (status != -ESHUTDOWN)
3830 				netif_wake_queue(dev->net);
3831 		}
3832 	}
3833 
3834 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3835 		unlink_urbs(dev, &dev->rxq);
3836 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3837 		if (status < 0 &&
3838 		    status != -EPIPE &&
3839 		    status != -ESHUTDOWN) {
3840 			if (netif_msg_rx_err(dev))
3841 				netdev_err(dev->net,
3842 					   "can't clear rx halt, status %d\n",
3843 					   status);
3844 		} else {
3845 			clear_bit(EVENT_RX_HALT, &dev->flags);
3846 			tasklet_schedule(&dev->bh);
3847 		}
3848 	}
3849 
3850 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3851 		int ret = 0;
3852 
3853 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3854 		if (lan78xx_link_reset(dev) < 0) {
3855 			netdev_info(dev->net, "link reset failed (%d)\n",
3856 				    ret);
3857 		}
3858 	}
3859 
3860 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3861 		lan78xx_update_stats(dev);
3862 
3863 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3864 
3865 		mod_timer(&dev->stat_monitor,
3866 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3867 
3868 		dev->delta = min((dev->delta * 2), 50);
3869 	}
3870 
3871 	usb_autopm_put_interface(dev->intf);
3872 }
3873 
3874 static void intr_complete(struct urb *urb)
3875 {
3876 	struct lan78xx_net *dev = urb->context;
3877 	int status = urb->status;
3878 
3879 	switch (status) {
3880 	/* success */
3881 	case 0:
3882 		lan78xx_status(dev, urb);
3883 		break;
3884 
3885 	/* software-driven interface shutdown */
3886 	case -ENOENT:			/* urb killed */
3887 	case -ENODEV:			/* hardware gone */
3888 	case -ESHUTDOWN:		/* hardware gone */
3889 		netif_dbg(dev, ifdown, dev->net,
3890 			  "intr shutdown, code %d\n", status);
3891 		return;
3892 
3893 	/* NOTE:  not throttling like RX/TX, since this endpoint
3894 	 * already polls infrequently
3895 	 */
3896 	default:
3897 		netdev_dbg(dev->net, "intr status %d\n", status);
3898 		break;
3899 	}
3900 
3901 	if (!netif_device_present(dev->net) ||
3902 	    !netif_running(dev->net)) {
3903 		netdev_warn(dev->net, "not submitting new status URB");
3904 		return;
3905 	}
3906 
3907 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3908 	status = usb_submit_urb(urb, GFP_ATOMIC);
3909 
3910 	switch (status) {
3911 	case  0:
3912 		break;
3913 	case -ENODEV:
3914 	case -ENOENT:
3915 		netif_dbg(dev, timer, dev->net,
3916 			  "intr resubmit %d (disconnect?)", status);
3917 		netif_device_detach(dev->net);
3918 		break;
3919 	default:
3920 		netif_err(dev, timer, dev->net,
3921 			  "intr resubmit --> %d\n", status);
3922 		break;
3923 	}
3924 }
3925 
3926 static void lan78xx_disconnect(struct usb_interface *intf)
3927 {
3928 	struct lan78xx_net *dev;
3929 	struct usb_device *udev;
3930 	struct net_device *net;
3931 	struct phy_device *phydev;
3932 
3933 	dev = usb_get_intfdata(intf);
3934 	usb_set_intfdata(intf, NULL);
3935 	if (!dev)
3936 		return;
3937 
3938 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
3939 
3940 	udev = interface_to_usbdev(intf);
3941 	net = dev->net;
3942 
3943 	unregister_netdev(net);
3944 
3945 	cancel_delayed_work_sync(&dev->wq);
3946 
3947 	phydev = net->phydev;
3948 
3949 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3950 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3951 
3952 	phy_disconnect(net->phydev);
3953 
3954 	if (phy_is_pseudo_fixed_link(phydev))
3955 		fixed_phy_unregister(phydev);
3956 
3957 	usb_scuttle_anchored_urbs(&dev->deferred);
3958 
3959 	if (timer_pending(&dev->stat_monitor))
3960 		del_timer_sync(&dev->stat_monitor);
3961 
3962 	lan78xx_unbind(dev, intf);
3963 
3964 	usb_kill_urb(dev->urb_intr);
3965 	usb_free_urb(dev->urb_intr);
3966 
3967 	free_netdev(net);
3968 	usb_put_dev(udev);
3969 }
3970 
3971 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3972 {
3973 	struct lan78xx_net *dev = netdev_priv(net);
3974 
3975 	unlink_urbs(dev, &dev->txq);
3976 	tasklet_schedule(&dev->bh);
3977 }
3978 
3979 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3980 						struct net_device *netdev,
3981 						netdev_features_t features)
3982 {
3983 	if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3984 		features &= ~NETIF_F_GSO_MASK;
3985 
3986 	features = vlan_features_check(skb, features);
3987 	features = vxlan_features_check(skb, features);
3988 
3989 	return features;
3990 }
3991 
3992 static const struct net_device_ops lan78xx_netdev_ops = {
3993 	.ndo_open		= lan78xx_open,
3994 	.ndo_stop		= lan78xx_stop,
3995 	.ndo_start_xmit		= lan78xx_start_xmit,
3996 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3997 	.ndo_change_mtu		= lan78xx_change_mtu,
3998 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3999 	.ndo_validate_addr	= eth_validate_addr,
4000 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4001 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4002 	.ndo_set_features	= lan78xx_set_features,
4003 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4004 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4005 	.ndo_features_check	= lan78xx_features_check,
4006 };
4007 
4008 static void lan78xx_stat_monitor(struct timer_list *t)
4009 {
4010 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4011 
4012 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4013 }
4014 
4015 static int lan78xx_probe(struct usb_interface *intf,
4016 			 const struct usb_device_id *id)
4017 {
4018 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4019 	struct lan78xx_net *dev;
4020 	struct net_device *netdev;
4021 	struct usb_device *udev;
4022 	int ret;
4023 	unsigned int maxp;
4024 	unsigned int period;
4025 	u8 *buf = NULL;
4026 
4027 	udev = interface_to_usbdev(intf);
4028 	udev = usb_get_dev(udev);
4029 
4030 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4031 	if (!netdev) {
4032 		dev_err(&intf->dev, "Error: OOM\n");
4033 		ret = -ENOMEM;
4034 		goto out1;
4035 	}
4036 
4037 	/* netdev_printk() needs this */
4038 	SET_NETDEV_DEV(netdev, &intf->dev);
4039 
4040 	dev = netdev_priv(netdev);
4041 	dev->udev = udev;
4042 	dev->intf = intf;
4043 	dev->net = netdev;
4044 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4045 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4046 
4047 	skb_queue_head_init(&dev->rxq);
4048 	skb_queue_head_init(&dev->txq);
4049 	skb_queue_head_init(&dev->done);
4050 	skb_queue_head_init(&dev->txq_pend);
4051 	mutex_init(&dev->phy_mutex);
4052 	mutex_init(&dev->dev_mutex);
4053 
4054 	tasklet_setup(&dev->bh, lan78xx_bh);
4055 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4056 	init_usb_anchor(&dev->deferred);
4057 
4058 	netdev->netdev_ops = &lan78xx_netdev_ops;
4059 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4060 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4061 
4062 	dev->delta = 1;
4063 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4064 
4065 	mutex_init(&dev->stats.access_lock);
4066 
4067 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4068 		ret = -ENODEV;
4069 		goto out2;
4070 	}
4071 
4072 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4073 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4074 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4075 		ret = -ENODEV;
4076 		goto out2;
4077 	}
4078 
4079 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4080 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4081 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4082 		ret = -ENODEV;
4083 		goto out2;
4084 	}
4085 
4086 	ep_intr = &intf->cur_altsetting->endpoint[2];
4087 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4088 		ret = -ENODEV;
4089 		goto out2;
4090 	}
4091 
4092 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4093 					usb_endpoint_num(&ep_intr->desc));
4094 
4095 	ret = lan78xx_bind(dev, intf);
4096 	if (ret < 0)
4097 		goto out2;
4098 
4099 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
4100 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
4101 
4102 	/* MTU range: 68 - 9000 */
4103 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4104 	netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
4105 
4106 	period = ep_intr->desc.bInterval;
4107 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
4108 	buf = kmalloc(maxp, GFP_KERNEL);
4109 	if (buf) {
4110 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4111 		if (!dev->urb_intr) {
4112 			ret = -ENOMEM;
4113 			kfree(buf);
4114 			goto out3;
4115 		} else {
4116 			usb_fill_int_urb(dev->urb_intr, dev->udev,
4117 					 dev->pipe_intr, buf, maxp,
4118 					 intr_complete, dev, period);
4119 			dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4120 		}
4121 	}
4122 
4123 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4124 
4125 	/* Reject broken descriptors. */
4126 	if (dev->maxpacket == 0) {
4127 		ret = -ENODEV;
4128 		goto out4;
4129 	}
4130 
4131 	/* driver requires remote-wakeup capability during autosuspend. */
4132 	intf->needs_remote_wakeup = 1;
4133 
4134 	ret = lan78xx_phy_init(dev);
4135 	if (ret < 0)
4136 		goto out4;
4137 
4138 	ret = register_netdev(netdev);
4139 	if (ret != 0) {
4140 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4141 		goto out5;
4142 	}
4143 
4144 	usb_set_intfdata(intf, dev);
4145 
4146 	ret = device_set_wakeup_enable(&udev->dev, true);
4147 
4148 	 /* Default delay of 2sec has more overhead than advantage.
4149 	  * Set to 10sec as default.
4150 	  */
4151 	pm_runtime_set_autosuspend_delay(&udev->dev,
4152 					 DEFAULT_AUTOSUSPEND_DELAY);
4153 
4154 	return 0;
4155 
4156 out5:
4157 	phy_disconnect(netdev->phydev);
4158 out4:
4159 	usb_free_urb(dev->urb_intr);
4160 out3:
4161 	lan78xx_unbind(dev, intf);
4162 out2:
4163 	free_netdev(netdev);
4164 out1:
4165 	usb_put_dev(udev);
4166 
4167 	return ret;
4168 }
4169 
4170 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4171 {
4172 	const u16 crc16poly = 0x8005;
4173 	int i;
4174 	u16 bit, crc, msb;
4175 	u8 data;
4176 
4177 	crc = 0xFFFF;
4178 	for (i = 0; i < len; i++) {
4179 		data = *buf++;
4180 		for (bit = 0; bit < 8; bit++) {
4181 			msb = crc >> 15;
4182 			crc <<= 1;
4183 
4184 			if (msb ^ (u16)(data & 1)) {
4185 				crc ^= crc16poly;
4186 				crc |= (u16)0x0001U;
4187 			}
4188 			data >>= 1;
4189 		}
4190 	}
4191 
4192 	return crc;
4193 }
4194 
4195 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4196 {
4197 	u32 buf;
4198 	int ret;
4199 
4200 	ret = lan78xx_stop_tx_path(dev);
4201 	if (ret < 0)
4202 		return ret;
4203 
4204 	ret = lan78xx_stop_rx_path(dev);
4205 	if (ret < 0)
4206 		return ret;
4207 
4208 	/* auto suspend (selective suspend) */
4209 
4210 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4211 	if (ret < 0)
4212 		return ret;
4213 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4214 	if (ret < 0)
4215 		return ret;
4216 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4217 	if (ret < 0)
4218 		return ret;
4219 
4220 	/* set goodframe wakeup */
4221 
4222 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4223 	if (ret < 0)
4224 		return ret;
4225 
4226 	buf |= WUCSR_RFE_WAKE_EN_;
4227 	buf |= WUCSR_STORE_WAKE_;
4228 
4229 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4230 	if (ret < 0)
4231 		return ret;
4232 
4233 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4234 	if (ret < 0)
4235 		return ret;
4236 
4237 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4238 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4239 	buf |= PMT_CTL_PHY_WAKE_EN_;
4240 	buf |= PMT_CTL_WOL_EN_;
4241 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4242 	buf |= PMT_CTL_SUS_MODE_3_;
4243 
4244 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4245 	if (ret < 0)
4246 		return ret;
4247 
4248 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4249 	if (ret < 0)
4250 		return ret;
4251 
4252 	buf |= PMT_CTL_WUPS_MASK_;
4253 
4254 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4255 	if (ret < 0)
4256 		return ret;
4257 
4258 	ret = lan78xx_start_rx_path(dev);
4259 
4260 	return ret;
4261 }
4262 
4263 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4264 {
4265 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4266 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4267 	const u8 arp_type[2] = { 0x08, 0x06 };
4268 	u32 temp_pmt_ctl;
4269 	int mask_index;
4270 	u32 temp_wucsr;
4271 	u32 buf;
4272 	u16 crc;
4273 	int ret;
4274 
4275 	ret = lan78xx_stop_tx_path(dev);
4276 	if (ret < 0)
4277 		return ret;
4278 	ret = lan78xx_stop_rx_path(dev);
4279 	if (ret < 0)
4280 		return ret;
4281 
4282 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4283 	if (ret < 0)
4284 		return ret;
4285 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4286 	if (ret < 0)
4287 		return ret;
4288 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4289 	if (ret < 0)
4290 		return ret;
4291 
4292 	temp_wucsr = 0;
4293 
4294 	temp_pmt_ctl = 0;
4295 
4296 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4297 	if (ret < 0)
4298 		return ret;
4299 
4300 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4301 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4302 
4303 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4304 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4305 		if (ret < 0)
4306 			return ret;
4307 	}
4308 
4309 	mask_index = 0;
4310 	if (wol & WAKE_PHY) {
4311 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4312 
4313 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4314 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4315 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4316 	}
4317 	if (wol & WAKE_MAGIC) {
4318 		temp_wucsr |= WUCSR_MPEN_;
4319 
4320 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4321 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4322 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4323 	}
4324 	if (wol & WAKE_BCAST) {
4325 		temp_wucsr |= WUCSR_BCST_EN_;
4326 
4327 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4328 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4329 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4330 	}
4331 	if (wol & WAKE_MCAST) {
4332 		temp_wucsr |= WUCSR_WAKE_EN_;
4333 
4334 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4335 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4336 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4337 					WUF_CFGX_EN_ |
4338 					WUF_CFGX_TYPE_MCAST_ |
4339 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4340 					(crc & WUF_CFGX_CRC16_MASK_));
4341 		if (ret < 0)
4342 			return ret;
4343 
4344 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4345 		if (ret < 0)
4346 			return ret;
4347 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4348 		if (ret < 0)
4349 			return ret;
4350 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4351 		if (ret < 0)
4352 			return ret;
4353 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4354 		if (ret < 0)
4355 			return ret;
4356 
4357 		mask_index++;
4358 
4359 		/* for IPv6 Multicast */
4360 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4361 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4362 					WUF_CFGX_EN_ |
4363 					WUF_CFGX_TYPE_MCAST_ |
4364 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4365 					(crc & WUF_CFGX_CRC16_MASK_));
4366 		if (ret < 0)
4367 			return ret;
4368 
4369 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4370 		if (ret < 0)
4371 			return ret;
4372 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4373 		if (ret < 0)
4374 			return ret;
4375 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4376 		if (ret < 0)
4377 			return ret;
4378 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4379 		if (ret < 0)
4380 			return ret;
4381 
4382 		mask_index++;
4383 
4384 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4385 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4386 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4387 	}
4388 	if (wol & WAKE_UCAST) {
4389 		temp_wucsr |= WUCSR_PFDA_EN_;
4390 
4391 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4392 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4393 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4394 	}
4395 	if (wol & WAKE_ARP) {
4396 		temp_wucsr |= WUCSR_WAKE_EN_;
4397 
4398 		/* set WUF_CFG & WUF_MASK
4399 		 * for packettype (offset 12,13) = ARP (0x0806)
4400 		 */
4401 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4402 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4403 					WUF_CFGX_EN_ |
4404 					WUF_CFGX_TYPE_ALL_ |
4405 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4406 					(crc & WUF_CFGX_CRC16_MASK_));
4407 		if (ret < 0)
4408 			return ret;
4409 
4410 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4411 		if (ret < 0)
4412 			return ret;
4413 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4414 		if (ret < 0)
4415 			return ret;
4416 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4417 		if (ret < 0)
4418 			return ret;
4419 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4420 		if (ret < 0)
4421 			return ret;
4422 
4423 		mask_index++;
4424 
4425 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4426 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4427 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4428 	}
4429 
4430 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4431 	if (ret < 0)
4432 		return ret;
4433 
4434 	/* when multiple WOL bits are set */
4435 	if (hweight_long((unsigned long)wol) > 1) {
4436 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4437 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4438 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4439 	}
4440 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4441 	if (ret < 0)
4442 		return ret;
4443 
4444 	/* clear WUPS */
4445 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4446 	if (ret < 0)
4447 		return ret;
4448 
4449 	buf |= PMT_CTL_WUPS_MASK_;
4450 
4451 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4452 	if (ret < 0)
4453 		return ret;
4454 
4455 	ret = lan78xx_start_rx_path(dev);
4456 
4457 	return ret;
4458 }
4459 
4460 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4461 {
4462 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4463 	bool dev_open;
4464 	int ret;
4465 
4466 	mutex_lock(&dev->dev_mutex);
4467 
4468 	netif_dbg(dev, ifdown, dev->net,
4469 		  "suspending: pm event %#x", message.event);
4470 
4471 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4472 
4473 	if (dev_open) {
4474 		spin_lock_irq(&dev->txq.lock);
4475 		/* don't autosuspend while transmitting */
4476 		if ((skb_queue_len(&dev->txq) ||
4477 		     skb_queue_len(&dev->txq_pend)) &&
4478 		    PMSG_IS_AUTO(message)) {
4479 			spin_unlock_irq(&dev->txq.lock);
4480 			ret = -EBUSY;
4481 			goto out;
4482 		} else {
4483 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4484 			spin_unlock_irq(&dev->txq.lock);
4485 		}
4486 
4487 		/* stop RX */
4488 		ret = lan78xx_stop_rx_path(dev);
4489 		if (ret < 0)
4490 			goto out;
4491 
4492 		ret = lan78xx_flush_rx_fifo(dev);
4493 		if (ret < 0)
4494 			goto out;
4495 
4496 		/* stop Tx */
4497 		ret = lan78xx_stop_tx_path(dev);
4498 		if (ret < 0)
4499 			goto out;
4500 
4501 		/* empty out the Rx and Tx queues */
4502 		netif_device_detach(dev->net);
4503 		lan78xx_terminate_urbs(dev);
4504 		usb_kill_urb(dev->urb_intr);
4505 
4506 		/* reattach */
4507 		netif_device_attach(dev->net);
4508 
4509 		del_timer(&dev->stat_monitor);
4510 
4511 		if (PMSG_IS_AUTO(message)) {
4512 			ret = lan78xx_set_auto_suspend(dev);
4513 			if (ret < 0)
4514 				goto out;
4515 		} else {
4516 			struct lan78xx_priv *pdata;
4517 
4518 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4519 			netif_carrier_off(dev->net);
4520 			ret = lan78xx_set_suspend(dev, pdata->wol);
4521 			if (ret < 0)
4522 				goto out;
4523 		}
4524 	} else {
4525 		/* Interface is down; don't allow WOL and PHY
4526 		 * events to wake up the host
4527 		 */
4528 		u32 buf;
4529 
4530 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4531 
4532 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4533 		if (ret < 0)
4534 			goto out;
4535 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4536 		if (ret < 0)
4537 			goto out;
4538 
4539 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4540 		if (ret < 0)
4541 			goto out;
4542 
4543 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4544 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4545 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4546 		buf |= PMT_CTL_SUS_MODE_3_;
4547 
4548 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4549 		if (ret < 0)
4550 			goto out;
4551 
4552 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4553 		if (ret < 0)
4554 			goto out;
4555 
4556 		buf |= PMT_CTL_WUPS_MASK_;
4557 
4558 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4559 		if (ret < 0)
4560 			goto out;
4561 	}
4562 
4563 	ret = 0;
4564 out:
4565 	mutex_unlock(&dev->dev_mutex);
4566 
4567 	return ret;
4568 }
4569 
4570 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4571 {
4572 	bool pipe_halted = false;
4573 	struct urb *urb;
4574 
4575 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4576 		struct sk_buff *skb = urb->context;
4577 		int ret;
4578 
4579 		if (!netif_device_present(dev->net) ||
4580 		    !netif_carrier_ok(dev->net) ||
4581 		    pipe_halted) {
4582 			usb_free_urb(urb);
4583 			dev_kfree_skb(skb);
4584 			continue;
4585 		}
4586 
4587 		ret = usb_submit_urb(urb, GFP_ATOMIC);
4588 
4589 		if (ret == 0) {
4590 			netif_trans_update(dev->net);
4591 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4592 		} else {
4593 			usb_free_urb(urb);
4594 			dev_kfree_skb(skb);
4595 
4596 			if (ret == -EPIPE) {
4597 				netif_stop_queue(dev->net);
4598 				pipe_halted = true;
4599 			} else if (ret == -ENODEV) {
4600 				netif_device_detach(dev->net);
4601 			}
4602 		}
4603 	}
4604 
4605 	return pipe_halted;
4606 }
4607 
4608 static int lan78xx_resume(struct usb_interface *intf)
4609 {
4610 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4611 	bool dev_open;
4612 	int ret;
4613 
4614 	mutex_lock(&dev->dev_mutex);
4615 
4616 	netif_dbg(dev, ifup, dev->net, "resuming device");
4617 
4618 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4619 
4620 	if (dev_open) {
4621 		bool pipe_halted = false;
4622 
4623 		ret = lan78xx_flush_tx_fifo(dev);
4624 		if (ret < 0)
4625 			goto out;
4626 
4627 		if (dev->urb_intr) {
4628 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4629 
4630 			if (ret < 0) {
4631 				if (ret == -ENODEV)
4632 					netif_device_detach(dev->net);
4633 
4634 			netdev_warn(dev->net, "Failed to submit intr URB");
4635 			}
4636 		}
4637 
4638 		spin_lock_irq(&dev->txq.lock);
4639 
4640 		if (netif_device_present(dev->net)) {
4641 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
4642 
4643 			if (pipe_halted)
4644 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4645 		}
4646 
4647 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4648 
4649 		spin_unlock_irq(&dev->txq.lock);
4650 
4651 		if (!pipe_halted &&
4652 		    netif_device_present(dev->net) &&
4653 		    (skb_queue_len(&dev->txq) < dev->tx_qlen))
4654 			netif_start_queue(dev->net);
4655 
4656 		ret = lan78xx_start_tx_path(dev);
4657 		if (ret < 0)
4658 			goto out;
4659 
4660 		tasklet_schedule(&dev->bh);
4661 
4662 		if (!timer_pending(&dev->stat_monitor)) {
4663 			dev->delta = 1;
4664 			mod_timer(&dev->stat_monitor,
4665 				  jiffies + STAT_UPDATE_TIMER);
4666 		}
4667 
4668 	} else {
4669 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4670 	}
4671 
4672 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4673 	if (ret < 0)
4674 		goto out;
4675 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4676 	if (ret < 0)
4677 		goto out;
4678 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4679 	if (ret < 0)
4680 		goto out;
4681 
4682 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4683 					     WUCSR2_ARP_RCD_ |
4684 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4685 					     WUCSR2_IPV4_TCPSYN_RCD_);
4686 	if (ret < 0)
4687 		goto out;
4688 
4689 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4690 					    WUCSR_EEE_RX_WAKE_ |
4691 					    WUCSR_PFDA_FR_ |
4692 					    WUCSR_RFE_WAKE_FR_ |
4693 					    WUCSR_WUFR_ |
4694 					    WUCSR_MPR_ |
4695 					    WUCSR_BCST_FR_);
4696 	if (ret < 0)
4697 		goto out;
4698 
4699 	ret = 0;
4700 out:
4701 	mutex_unlock(&dev->dev_mutex);
4702 
4703 	return ret;
4704 }
4705 
4706 static int lan78xx_reset_resume(struct usb_interface *intf)
4707 {
4708 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4709 	int ret;
4710 
4711 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
4712 
4713 	ret = lan78xx_reset(dev);
4714 	if (ret < 0)
4715 		return ret;
4716 
4717 	phy_start(dev->net->phydev);
4718 
4719 	ret = lan78xx_resume(intf);
4720 
4721 	return ret;
4722 }
4723 
4724 static const struct usb_device_id products[] = {
4725 	{
4726 	/* LAN7800 USB Gigabit Ethernet Device */
4727 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4728 	},
4729 	{
4730 	/* LAN7850 USB Gigabit Ethernet Device */
4731 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4732 	},
4733 	{
4734 	/* LAN7801 USB Gigabit Ethernet Device */
4735 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4736 	},
4737 	{},
4738 };
4739 MODULE_DEVICE_TABLE(usb, products);
4740 
4741 static struct usb_driver lan78xx_driver = {
4742 	.name			= DRIVER_NAME,
4743 	.id_table		= products,
4744 	.probe			= lan78xx_probe,
4745 	.disconnect		= lan78xx_disconnect,
4746 	.suspend		= lan78xx_suspend,
4747 	.resume			= lan78xx_resume,
4748 	.reset_resume		= lan78xx_reset_resume,
4749 	.supports_autosuspend	= 1,
4750 	.disable_hub_initiated_lpm = 1,
4751 };
4752 
4753 module_usb_driver(lan78xx_driver);
4754 
4755 MODULE_AUTHOR(DRIVER_AUTHOR);
4756 MODULE_DESCRIPTION(DRIVER_DESC);
4757 MODULE_LICENSE("GPL");
4758