xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision 47010c04)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define LAN78XX_NAPI_WEIGHT		64
96 
97 #define TX_URB_NUM			10
98 #define TX_SS_URB_NUM			TX_URB_NUM
99 #define TX_HS_URB_NUM			TX_URB_NUM
100 #define TX_FS_URB_NUM			TX_URB_NUM
101 
102 /* A single URB buffer must be large enough to hold a complete jumbo packet
103  */
104 #define TX_SS_URB_SIZE			(32 * 1024)
105 #define TX_HS_URB_SIZE			(16 * 1024)
106 #define TX_FS_URB_SIZE			(10 * 1024)
107 
108 #define RX_SS_URB_NUM			30
109 #define RX_HS_URB_NUM			10
110 #define RX_FS_URB_NUM			10
111 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
112 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
113 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
114 
115 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
116 #define SS_BULK_IN_DELAY		0x2000
117 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
118 #define HS_BULK_IN_DELAY		0x2000
119 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
120 #define FS_BULK_IN_DELAY		0x2000
121 
122 #define TX_CMD_LEN			8
123 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
124 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
125 
126 #define RX_CMD_LEN			10
127 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
128 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
129 
130 /* USB related defines */
131 #define BULK_IN_PIPE			1
132 #define BULK_OUT_PIPE			2
133 
134 /* default autosuspend delay (mSec)*/
135 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
136 
137 /* statistic update interval (mSec) */
138 #define STAT_UPDATE_TIMER		(1 * 1000)
139 
140 /* time to wait for MAC or FCT to stop (jiffies) */
141 #define HW_DISABLE_TIMEOUT		(HZ / 10)
142 
143 /* time to wait between polling MAC or FCT state (ms) */
144 #define HW_DISABLE_DELAY_MS		1
145 
146 /* defines interrupts from interrupt EP */
147 #define MAX_INT_EP			(32)
148 #define INT_EP_INTEP			(31)
149 #define INT_EP_OTP_WR_DONE		(28)
150 #define INT_EP_EEE_TX_LPI_START		(26)
151 #define INT_EP_EEE_TX_LPI_STOP		(25)
152 #define INT_EP_EEE_RX_LPI		(24)
153 #define INT_EP_MAC_RESET_TIMEOUT	(23)
154 #define INT_EP_RDFO			(22)
155 #define INT_EP_TXE			(21)
156 #define INT_EP_USB_STATUS		(20)
157 #define INT_EP_TX_DIS			(19)
158 #define INT_EP_RX_DIS			(18)
159 #define INT_EP_PHY			(17)
160 #define INT_EP_DP			(16)
161 #define INT_EP_MAC_ERR			(15)
162 #define INT_EP_TDFU			(14)
163 #define INT_EP_TDFO			(13)
164 #define INT_EP_UTX			(12)
165 #define INT_EP_GPIO_11			(11)
166 #define INT_EP_GPIO_10			(10)
167 #define INT_EP_GPIO_9			(9)
168 #define INT_EP_GPIO_8			(8)
169 #define INT_EP_GPIO_7			(7)
170 #define INT_EP_GPIO_6			(6)
171 #define INT_EP_GPIO_5			(5)
172 #define INT_EP_GPIO_4			(4)
173 #define INT_EP_GPIO_3			(3)
174 #define INT_EP_GPIO_2			(2)
175 #define INT_EP_GPIO_1			(1)
176 #define INT_EP_GPIO_0			(0)
177 
178 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
179 	"RX FCS Errors",
180 	"RX Alignment Errors",
181 	"Rx Fragment Errors",
182 	"RX Jabber Errors",
183 	"RX Undersize Frame Errors",
184 	"RX Oversize Frame Errors",
185 	"RX Dropped Frames",
186 	"RX Unicast Byte Count",
187 	"RX Broadcast Byte Count",
188 	"RX Multicast Byte Count",
189 	"RX Unicast Frames",
190 	"RX Broadcast Frames",
191 	"RX Multicast Frames",
192 	"RX Pause Frames",
193 	"RX 64 Byte Frames",
194 	"RX 65 - 127 Byte Frames",
195 	"RX 128 - 255 Byte Frames",
196 	"RX 256 - 511 Bytes Frames",
197 	"RX 512 - 1023 Byte Frames",
198 	"RX 1024 - 1518 Byte Frames",
199 	"RX Greater 1518 Byte Frames",
200 	"EEE RX LPI Transitions",
201 	"EEE RX LPI Time",
202 	"TX FCS Errors",
203 	"TX Excess Deferral Errors",
204 	"TX Carrier Errors",
205 	"TX Bad Byte Count",
206 	"TX Single Collisions",
207 	"TX Multiple Collisions",
208 	"TX Excessive Collision",
209 	"TX Late Collisions",
210 	"TX Unicast Byte Count",
211 	"TX Broadcast Byte Count",
212 	"TX Multicast Byte Count",
213 	"TX Unicast Frames",
214 	"TX Broadcast Frames",
215 	"TX Multicast Frames",
216 	"TX Pause Frames",
217 	"TX 64 Byte Frames",
218 	"TX 65 - 127 Byte Frames",
219 	"TX 128 - 255 Byte Frames",
220 	"TX 256 - 511 Bytes Frames",
221 	"TX 512 - 1023 Byte Frames",
222 	"TX 1024 - 1518 Byte Frames",
223 	"TX Greater 1518 Byte Frames",
224 	"EEE TX LPI Transitions",
225 	"EEE TX LPI Time",
226 };
227 
228 struct lan78xx_statstage {
229 	u32 rx_fcs_errors;
230 	u32 rx_alignment_errors;
231 	u32 rx_fragment_errors;
232 	u32 rx_jabber_errors;
233 	u32 rx_undersize_frame_errors;
234 	u32 rx_oversize_frame_errors;
235 	u32 rx_dropped_frames;
236 	u32 rx_unicast_byte_count;
237 	u32 rx_broadcast_byte_count;
238 	u32 rx_multicast_byte_count;
239 	u32 rx_unicast_frames;
240 	u32 rx_broadcast_frames;
241 	u32 rx_multicast_frames;
242 	u32 rx_pause_frames;
243 	u32 rx_64_byte_frames;
244 	u32 rx_65_127_byte_frames;
245 	u32 rx_128_255_byte_frames;
246 	u32 rx_256_511_bytes_frames;
247 	u32 rx_512_1023_byte_frames;
248 	u32 rx_1024_1518_byte_frames;
249 	u32 rx_greater_1518_byte_frames;
250 	u32 eee_rx_lpi_transitions;
251 	u32 eee_rx_lpi_time;
252 	u32 tx_fcs_errors;
253 	u32 tx_excess_deferral_errors;
254 	u32 tx_carrier_errors;
255 	u32 tx_bad_byte_count;
256 	u32 tx_single_collisions;
257 	u32 tx_multiple_collisions;
258 	u32 tx_excessive_collision;
259 	u32 tx_late_collisions;
260 	u32 tx_unicast_byte_count;
261 	u32 tx_broadcast_byte_count;
262 	u32 tx_multicast_byte_count;
263 	u32 tx_unicast_frames;
264 	u32 tx_broadcast_frames;
265 	u32 tx_multicast_frames;
266 	u32 tx_pause_frames;
267 	u32 tx_64_byte_frames;
268 	u32 tx_65_127_byte_frames;
269 	u32 tx_128_255_byte_frames;
270 	u32 tx_256_511_bytes_frames;
271 	u32 tx_512_1023_byte_frames;
272 	u32 tx_1024_1518_byte_frames;
273 	u32 tx_greater_1518_byte_frames;
274 	u32 eee_tx_lpi_transitions;
275 	u32 eee_tx_lpi_time;
276 };
277 
278 struct lan78xx_statstage64 {
279 	u64 rx_fcs_errors;
280 	u64 rx_alignment_errors;
281 	u64 rx_fragment_errors;
282 	u64 rx_jabber_errors;
283 	u64 rx_undersize_frame_errors;
284 	u64 rx_oversize_frame_errors;
285 	u64 rx_dropped_frames;
286 	u64 rx_unicast_byte_count;
287 	u64 rx_broadcast_byte_count;
288 	u64 rx_multicast_byte_count;
289 	u64 rx_unicast_frames;
290 	u64 rx_broadcast_frames;
291 	u64 rx_multicast_frames;
292 	u64 rx_pause_frames;
293 	u64 rx_64_byte_frames;
294 	u64 rx_65_127_byte_frames;
295 	u64 rx_128_255_byte_frames;
296 	u64 rx_256_511_bytes_frames;
297 	u64 rx_512_1023_byte_frames;
298 	u64 rx_1024_1518_byte_frames;
299 	u64 rx_greater_1518_byte_frames;
300 	u64 eee_rx_lpi_transitions;
301 	u64 eee_rx_lpi_time;
302 	u64 tx_fcs_errors;
303 	u64 tx_excess_deferral_errors;
304 	u64 tx_carrier_errors;
305 	u64 tx_bad_byte_count;
306 	u64 tx_single_collisions;
307 	u64 tx_multiple_collisions;
308 	u64 tx_excessive_collision;
309 	u64 tx_late_collisions;
310 	u64 tx_unicast_byte_count;
311 	u64 tx_broadcast_byte_count;
312 	u64 tx_multicast_byte_count;
313 	u64 tx_unicast_frames;
314 	u64 tx_broadcast_frames;
315 	u64 tx_multicast_frames;
316 	u64 tx_pause_frames;
317 	u64 tx_64_byte_frames;
318 	u64 tx_65_127_byte_frames;
319 	u64 tx_128_255_byte_frames;
320 	u64 tx_256_511_bytes_frames;
321 	u64 tx_512_1023_byte_frames;
322 	u64 tx_1024_1518_byte_frames;
323 	u64 tx_greater_1518_byte_frames;
324 	u64 eee_tx_lpi_transitions;
325 	u64 eee_tx_lpi_time;
326 };
327 
328 static u32 lan78xx_regs[] = {
329 	ID_REV,
330 	INT_STS,
331 	HW_CFG,
332 	PMT_CTL,
333 	E2P_CMD,
334 	E2P_DATA,
335 	USB_STATUS,
336 	VLAN_TYPE,
337 	MAC_CR,
338 	MAC_RX,
339 	MAC_TX,
340 	FLOW,
341 	ERR_STS,
342 	MII_ACC,
343 	MII_DATA,
344 	EEE_TX_LPI_REQ_DLY,
345 	EEE_TW_TX_SYS,
346 	EEE_TX_LPI_REM_DLY,
347 	WUCSR
348 };
349 
350 #define PHY_REG_SIZE (32 * sizeof(u32))
351 
352 struct lan78xx_net;
353 
354 struct lan78xx_priv {
355 	struct lan78xx_net *dev;
356 	u32 rfe_ctl;
357 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
358 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
359 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
360 	struct mutex dataport_mutex; /* for dataport access */
361 	spinlock_t rfe_ctl_lock; /* for rfe register access */
362 	struct work_struct set_multicast;
363 	struct work_struct set_vlan;
364 	u32 wol;
365 };
366 
367 enum skb_state {
368 	illegal = 0,
369 	tx_start,
370 	tx_done,
371 	rx_start,
372 	rx_done,
373 	rx_cleanup,
374 	unlink_start
375 };
376 
377 struct skb_data {		/* skb->cb is one of these */
378 	struct urb *urb;
379 	struct lan78xx_net *dev;
380 	enum skb_state state;
381 	size_t length;
382 	int num_of_packet;
383 };
384 
385 struct usb_context {
386 	struct usb_ctrlrequest req;
387 	struct lan78xx_net *dev;
388 };
389 
390 #define EVENT_TX_HALT			0
391 #define EVENT_RX_HALT			1
392 #define EVENT_RX_MEMORY			2
393 #define EVENT_STS_SPLIT			3
394 #define EVENT_LINK_RESET		4
395 #define EVENT_RX_PAUSED			5
396 #define EVENT_DEV_WAKING		6
397 #define EVENT_DEV_ASLEEP		7
398 #define EVENT_DEV_OPEN			8
399 #define EVENT_STAT_UPDATE		9
400 #define EVENT_DEV_DISCONNECT		10
401 
402 struct statstage {
403 	struct mutex			access_lock;	/* for stats access */
404 	struct lan78xx_statstage	saved;
405 	struct lan78xx_statstage	rollover_count;
406 	struct lan78xx_statstage	rollover_max;
407 	struct lan78xx_statstage64	curr_stat;
408 };
409 
410 struct irq_domain_data {
411 	struct irq_domain	*irqdomain;
412 	unsigned int		phyirq;
413 	struct irq_chip		*irqchip;
414 	irq_flow_handler_t	irq_handler;
415 	u32			irqenable;
416 	struct mutex		irq_lock;		/* for irq bus access */
417 };
418 
419 struct lan78xx_net {
420 	struct net_device	*net;
421 	struct usb_device	*udev;
422 	struct usb_interface	*intf;
423 	void			*driver_priv;
424 
425 	unsigned int		tx_pend_data_len;
426 	size_t			n_tx_urbs;
427 	size_t			n_rx_urbs;
428 	size_t			tx_urb_size;
429 	size_t			rx_urb_size;
430 
431 	struct sk_buff_head	rxq_free;
432 	struct sk_buff_head	rxq;
433 	struct sk_buff_head	rxq_done;
434 	struct sk_buff_head	rxq_overflow;
435 	struct sk_buff_head	txq_free;
436 	struct sk_buff_head	txq;
437 	struct sk_buff_head	txq_pend;
438 
439 	struct napi_struct	napi;
440 
441 	struct delayed_work	wq;
442 
443 	int			msg_enable;
444 
445 	struct urb		*urb_intr;
446 	struct usb_anchor	deferred;
447 
448 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
449 	struct mutex		phy_mutex; /* for phy access */
450 	unsigned int		pipe_in, pipe_out, pipe_intr;
451 
452 	unsigned int		bulk_in_delay;
453 	unsigned int		burst_cap;
454 
455 	unsigned long		flags;
456 
457 	wait_queue_head_t	*wait;
458 	unsigned char		suspend_count;
459 
460 	unsigned int		maxpacket;
461 	struct timer_list	stat_monitor;
462 
463 	unsigned long		data[5];
464 
465 	int			link_on;
466 	u8			mdix_ctrl;
467 
468 	u32			chipid;
469 	u32			chiprev;
470 	struct mii_bus		*mdiobus;
471 	phy_interface_t		interface;
472 
473 	int			fc_autoneg;
474 	u8			fc_request_control;
475 
476 	int			delta;
477 	struct statstage	stats;
478 
479 	struct irq_domain_data	domain_data;
480 };
481 
482 /* define external phy id */
483 #define	PHY_LAN8835			(0x0007C130)
484 #define	PHY_KSZ9031RNX			(0x00221620)
485 
486 /* use ethtool to change the level for any given device */
487 static int msg_level = -1;
488 module_param(msg_level, int, 0);
489 MODULE_PARM_DESC(msg_level, "Override default message level");
490 
491 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
492 {
493 	if (skb_queue_empty(buf_pool))
494 		return NULL;
495 
496 	return skb_dequeue(buf_pool);
497 }
498 
499 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
500 				struct sk_buff *buf)
501 {
502 	buf->data = buf->head;
503 	skb_reset_tail_pointer(buf);
504 
505 	buf->len = 0;
506 	buf->data_len = 0;
507 
508 	skb_queue_tail(buf_pool, buf);
509 }
510 
511 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
512 {
513 	struct skb_data *entry;
514 	struct sk_buff *buf;
515 
516 	while (!skb_queue_empty(buf_pool)) {
517 		buf = skb_dequeue(buf_pool);
518 		if (buf) {
519 			entry = (struct skb_data *)buf->cb;
520 			usb_free_urb(entry->urb);
521 			dev_kfree_skb_any(buf);
522 		}
523 	}
524 }
525 
526 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
527 				  size_t n_urbs, size_t urb_size,
528 				  struct lan78xx_net *dev)
529 {
530 	struct skb_data *entry;
531 	struct sk_buff *buf;
532 	struct urb *urb;
533 	int i;
534 
535 	skb_queue_head_init(buf_pool);
536 
537 	for (i = 0; i < n_urbs; i++) {
538 		buf = alloc_skb(urb_size, GFP_ATOMIC);
539 		if (!buf)
540 			goto error;
541 
542 		if (skb_linearize(buf) != 0) {
543 			dev_kfree_skb_any(buf);
544 			goto error;
545 		}
546 
547 		urb = usb_alloc_urb(0, GFP_ATOMIC);
548 		if (!urb) {
549 			dev_kfree_skb_any(buf);
550 			goto error;
551 		}
552 
553 		entry = (struct skb_data *)buf->cb;
554 		entry->urb = urb;
555 		entry->dev = dev;
556 		entry->length = 0;
557 		entry->num_of_packet = 0;
558 
559 		skb_queue_tail(buf_pool, buf);
560 	}
561 
562 	return 0;
563 
564 error:
565 	lan78xx_free_buf_pool(buf_pool);
566 
567 	return -ENOMEM;
568 }
569 
570 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
571 {
572 	return lan78xx_get_buf(&dev->rxq_free);
573 }
574 
575 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
576 				   struct sk_buff *rx_buf)
577 {
578 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
579 }
580 
581 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
582 {
583 	lan78xx_free_buf_pool(&dev->rxq_free);
584 }
585 
586 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
587 {
588 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
589 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
590 }
591 
592 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
593 {
594 	return lan78xx_get_buf(&dev->txq_free);
595 }
596 
597 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
598 				   struct sk_buff *tx_buf)
599 {
600 	lan78xx_release_buf(&dev->txq_free, tx_buf);
601 }
602 
603 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
604 {
605 	lan78xx_free_buf_pool(&dev->txq_free);
606 }
607 
608 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
609 {
610 	return lan78xx_alloc_buf_pool(&dev->txq_free,
611 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
612 }
613 
614 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
615 {
616 	u32 *buf;
617 	int ret;
618 
619 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
620 		return -ENODEV;
621 
622 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
623 	if (!buf)
624 		return -ENOMEM;
625 
626 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
627 			      USB_VENDOR_REQUEST_READ_REGISTER,
628 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
629 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
630 	if (likely(ret >= 0)) {
631 		le32_to_cpus(buf);
632 		*data = *buf;
633 	} else if (net_ratelimit()) {
634 		netdev_warn(dev->net,
635 			    "Failed to read register index 0x%08x. ret = %d",
636 			    index, ret);
637 	}
638 
639 	kfree(buf);
640 
641 	return ret;
642 }
643 
644 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
645 {
646 	u32 *buf;
647 	int ret;
648 
649 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
650 		return -ENODEV;
651 
652 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
653 	if (!buf)
654 		return -ENOMEM;
655 
656 	*buf = data;
657 	cpu_to_le32s(buf);
658 
659 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
660 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
661 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
662 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
663 	if (unlikely(ret < 0) &&
664 	    net_ratelimit()) {
665 		netdev_warn(dev->net,
666 			    "Failed to write register index 0x%08x. ret = %d",
667 			    index, ret);
668 	}
669 
670 	kfree(buf);
671 
672 	return ret;
673 }
674 
675 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
676 			      u32 data)
677 {
678 	int ret;
679 	u32 buf;
680 
681 	ret = lan78xx_read_reg(dev, reg, &buf);
682 	if (ret < 0)
683 		return ret;
684 
685 	buf &= ~mask;
686 	buf |= (mask & data);
687 
688 	ret = lan78xx_write_reg(dev, reg, buf);
689 	if (ret < 0)
690 		return ret;
691 
692 	return 0;
693 }
694 
695 static int lan78xx_read_stats(struct lan78xx_net *dev,
696 			      struct lan78xx_statstage *data)
697 {
698 	int ret = 0;
699 	int i;
700 	struct lan78xx_statstage *stats;
701 	u32 *src;
702 	u32 *dst;
703 
704 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
705 	if (!stats)
706 		return -ENOMEM;
707 
708 	ret = usb_control_msg(dev->udev,
709 			      usb_rcvctrlpipe(dev->udev, 0),
710 			      USB_VENDOR_REQUEST_GET_STATS,
711 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
712 			      0,
713 			      0,
714 			      (void *)stats,
715 			      sizeof(*stats),
716 			      USB_CTRL_SET_TIMEOUT);
717 	if (likely(ret >= 0)) {
718 		src = (u32 *)stats;
719 		dst = (u32 *)data;
720 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
721 			le32_to_cpus(&src[i]);
722 			dst[i] = src[i];
723 		}
724 	} else {
725 		netdev_warn(dev->net,
726 			    "Failed to read stat ret = %d", ret);
727 	}
728 
729 	kfree(stats);
730 
731 	return ret;
732 }
733 
734 #define check_counter_rollover(struct1, dev_stats, member)		\
735 	do {								\
736 		if ((struct1)->member < (dev_stats).saved.member)	\
737 			(dev_stats).rollover_count.member++;		\
738 	} while (0)
739 
740 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
741 					struct lan78xx_statstage *stats)
742 {
743 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
744 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
745 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
746 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
747 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
748 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
749 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
750 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
751 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
752 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
753 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
754 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
755 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
756 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
757 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
758 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
759 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
760 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
761 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
762 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
763 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
764 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
765 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
766 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
767 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
768 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
769 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
770 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
771 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
772 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
773 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
774 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
775 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
776 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
777 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
778 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
779 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
780 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
781 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
782 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
783 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
784 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
785 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
786 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
787 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
788 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
789 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
790 
791 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
792 }
793 
794 static void lan78xx_update_stats(struct lan78xx_net *dev)
795 {
796 	u32 *p, *count, *max;
797 	u64 *data;
798 	int i;
799 	struct lan78xx_statstage lan78xx_stats;
800 
801 	if (usb_autopm_get_interface(dev->intf) < 0)
802 		return;
803 
804 	p = (u32 *)&lan78xx_stats;
805 	count = (u32 *)&dev->stats.rollover_count;
806 	max = (u32 *)&dev->stats.rollover_max;
807 	data = (u64 *)&dev->stats.curr_stat;
808 
809 	mutex_lock(&dev->stats.access_lock);
810 
811 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
812 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
813 
814 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
815 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
816 
817 	mutex_unlock(&dev->stats.access_lock);
818 
819 	usb_autopm_put_interface(dev->intf);
820 }
821 
822 /* Loop until the read is completed with timeout called with phy_mutex held */
823 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
824 {
825 	unsigned long start_time = jiffies;
826 	u32 val;
827 	int ret;
828 
829 	do {
830 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
831 		if (unlikely(ret < 0))
832 			return -EIO;
833 
834 		if (!(val & MII_ACC_MII_BUSY_))
835 			return 0;
836 	} while (!time_after(jiffies, start_time + HZ));
837 
838 	return -EIO;
839 }
840 
841 static inline u32 mii_access(int id, int index, int read)
842 {
843 	u32 ret;
844 
845 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
846 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
847 	if (read)
848 		ret |= MII_ACC_MII_READ_;
849 	else
850 		ret |= MII_ACC_MII_WRITE_;
851 	ret |= MII_ACC_MII_BUSY_;
852 
853 	return ret;
854 }
855 
856 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
857 {
858 	unsigned long start_time = jiffies;
859 	u32 val;
860 	int ret;
861 
862 	do {
863 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
864 		if (unlikely(ret < 0))
865 			return -EIO;
866 
867 		if (!(val & E2P_CMD_EPC_BUSY_) ||
868 		    (val & E2P_CMD_EPC_TIMEOUT_))
869 			break;
870 		usleep_range(40, 100);
871 	} while (!time_after(jiffies, start_time + HZ));
872 
873 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
874 		netdev_warn(dev->net, "EEPROM read operation timeout");
875 		return -EIO;
876 	}
877 
878 	return 0;
879 }
880 
881 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
882 {
883 	unsigned long start_time = jiffies;
884 	u32 val;
885 	int ret;
886 
887 	do {
888 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
889 		if (unlikely(ret < 0))
890 			return -EIO;
891 
892 		if (!(val & E2P_CMD_EPC_BUSY_))
893 			return 0;
894 
895 		usleep_range(40, 100);
896 	} while (!time_after(jiffies, start_time + HZ));
897 
898 	netdev_warn(dev->net, "EEPROM is busy");
899 	return -EIO;
900 }
901 
902 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
903 				   u32 length, u8 *data)
904 {
905 	u32 val;
906 	u32 saved;
907 	int i, ret;
908 	int retval;
909 
910 	/* depends on chip, some EEPROM pins are muxed with LED function.
911 	 * disable & restore LED function to access EEPROM.
912 	 */
913 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
914 	saved = val;
915 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
916 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
917 		ret = lan78xx_write_reg(dev, HW_CFG, val);
918 	}
919 
920 	retval = lan78xx_eeprom_confirm_not_busy(dev);
921 	if (retval)
922 		return retval;
923 
924 	for (i = 0; i < length; i++) {
925 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
926 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
927 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
928 		if (unlikely(ret < 0)) {
929 			retval = -EIO;
930 			goto exit;
931 		}
932 
933 		retval = lan78xx_wait_eeprom(dev);
934 		if (retval < 0)
935 			goto exit;
936 
937 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
938 		if (unlikely(ret < 0)) {
939 			retval = -EIO;
940 			goto exit;
941 		}
942 
943 		data[i] = val & 0xFF;
944 		offset++;
945 	}
946 
947 	retval = 0;
948 exit:
949 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
950 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
951 
952 	return retval;
953 }
954 
955 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
956 			       u32 length, u8 *data)
957 {
958 	u8 sig;
959 	int ret;
960 
961 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
962 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
963 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
964 	else
965 		ret = -EINVAL;
966 
967 	return ret;
968 }
969 
970 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
971 				    u32 length, u8 *data)
972 {
973 	u32 val;
974 	u32 saved;
975 	int i, ret;
976 	int retval;
977 
978 	/* depends on chip, some EEPROM pins are muxed with LED function.
979 	 * disable & restore LED function to access EEPROM.
980 	 */
981 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
982 	saved = val;
983 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
984 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
985 		ret = lan78xx_write_reg(dev, HW_CFG, val);
986 	}
987 
988 	retval = lan78xx_eeprom_confirm_not_busy(dev);
989 	if (retval)
990 		goto exit;
991 
992 	/* Issue write/erase enable command */
993 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
994 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
995 	if (unlikely(ret < 0)) {
996 		retval = -EIO;
997 		goto exit;
998 	}
999 
1000 	retval = lan78xx_wait_eeprom(dev);
1001 	if (retval < 0)
1002 		goto exit;
1003 
1004 	for (i = 0; i < length; i++) {
1005 		/* Fill data register */
1006 		val = data[i];
1007 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1008 		if (ret < 0) {
1009 			retval = -EIO;
1010 			goto exit;
1011 		}
1012 
1013 		/* Send "write" command */
1014 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1015 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1016 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1017 		if (ret < 0) {
1018 			retval = -EIO;
1019 			goto exit;
1020 		}
1021 
1022 		retval = lan78xx_wait_eeprom(dev);
1023 		if (retval < 0)
1024 			goto exit;
1025 
1026 		offset++;
1027 	}
1028 
1029 	retval = 0;
1030 exit:
1031 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1032 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
1033 
1034 	return retval;
1035 }
1036 
1037 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1038 				u32 length, u8 *data)
1039 {
1040 	int i;
1041 	u32 buf;
1042 	unsigned long timeout;
1043 
1044 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1045 
1046 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1047 		/* clear it and wait to be cleared */
1048 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1049 
1050 		timeout = jiffies + HZ;
1051 		do {
1052 			usleep_range(1, 10);
1053 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1054 			if (time_after(jiffies, timeout)) {
1055 				netdev_warn(dev->net,
1056 					    "timeout on OTP_PWR_DN");
1057 				return -EIO;
1058 			}
1059 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1060 	}
1061 
1062 	for (i = 0; i < length; i++) {
1063 		lan78xx_write_reg(dev, OTP_ADDR1,
1064 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1065 		lan78xx_write_reg(dev, OTP_ADDR2,
1066 				  ((offset + i) & OTP_ADDR2_10_3));
1067 
1068 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1069 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1070 
1071 		timeout = jiffies + HZ;
1072 		do {
1073 			udelay(1);
1074 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1075 			if (time_after(jiffies, timeout)) {
1076 				netdev_warn(dev->net,
1077 					    "timeout on OTP_STATUS");
1078 				return -EIO;
1079 			}
1080 		} while (buf & OTP_STATUS_BUSY_);
1081 
1082 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1083 
1084 		data[i] = (u8)(buf & 0xFF);
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1091 				 u32 length, u8 *data)
1092 {
1093 	int i;
1094 	u32 buf;
1095 	unsigned long timeout;
1096 
1097 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1098 
1099 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1100 		/* clear it and wait to be cleared */
1101 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1102 
1103 		timeout = jiffies + HZ;
1104 		do {
1105 			udelay(1);
1106 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1107 			if (time_after(jiffies, timeout)) {
1108 				netdev_warn(dev->net,
1109 					    "timeout on OTP_PWR_DN completion");
1110 				return -EIO;
1111 			}
1112 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1113 	}
1114 
1115 	/* set to BYTE program mode */
1116 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1117 
1118 	for (i = 0; i < length; i++) {
1119 		lan78xx_write_reg(dev, OTP_ADDR1,
1120 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1121 		lan78xx_write_reg(dev, OTP_ADDR2,
1122 				  ((offset + i) & OTP_ADDR2_10_3));
1123 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1124 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1125 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1126 
1127 		timeout = jiffies + HZ;
1128 		do {
1129 			udelay(1);
1130 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1131 			if (time_after(jiffies, timeout)) {
1132 				netdev_warn(dev->net,
1133 					    "Timeout on OTP_STATUS completion");
1134 				return -EIO;
1135 			}
1136 		} while (buf & OTP_STATUS_BUSY_);
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1143 			    u32 length, u8 *data)
1144 {
1145 	u8 sig;
1146 	int ret;
1147 
1148 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1149 
1150 	if (ret == 0) {
1151 		if (sig == OTP_INDICATOR_2)
1152 			offset += 0x100;
1153 		else if (sig != OTP_INDICATOR_1)
1154 			ret = -EINVAL;
1155 		if (!ret)
1156 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1157 	}
1158 
1159 	return ret;
1160 }
1161 
1162 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1163 {
1164 	int i, ret;
1165 
1166 	for (i = 0; i < 100; i++) {
1167 		u32 dp_sel;
1168 
1169 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1170 		if (unlikely(ret < 0))
1171 			return -EIO;
1172 
1173 		if (dp_sel & DP_SEL_DPRDY_)
1174 			return 0;
1175 
1176 		usleep_range(40, 100);
1177 	}
1178 
1179 	netdev_warn(dev->net, "%s timed out", __func__);
1180 
1181 	return -EIO;
1182 }
1183 
1184 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1185 				  u32 addr, u32 length, u32 *buf)
1186 {
1187 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1188 	u32 dp_sel;
1189 	int i, ret;
1190 
1191 	if (usb_autopm_get_interface(dev->intf) < 0)
1192 		return 0;
1193 
1194 	mutex_lock(&pdata->dataport_mutex);
1195 
1196 	ret = lan78xx_dataport_wait_not_busy(dev);
1197 	if (ret < 0)
1198 		goto done;
1199 
1200 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1201 
1202 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1203 	dp_sel |= ram_select;
1204 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1205 
1206 	for (i = 0; i < length; i++) {
1207 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1208 
1209 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1210 
1211 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1212 
1213 		ret = lan78xx_dataport_wait_not_busy(dev);
1214 		if (ret < 0)
1215 			goto done;
1216 	}
1217 
1218 done:
1219 	mutex_unlock(&pdata->dataport_mutex);
1220 	usb_autopm_put_interface(dev->intf);
1221 
1222 	return ret;
1223 }
1224 
1225 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1226 				    int index, u8 addr[ETH_ALEN])
1227 {
1228 	u32 temp;
1229 
1230 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1231 		temp = addr[3];
1232 		temp = addr[2] | (temp << 8);
1233 		temp = addr[1] | (temp << 8);
1234 		temp = addr[0] | (temp << 8);
1235 		pdata->pfilter_table[index][1] = temp;
1236 		temp = addr[5];
1237 		temp = addr[4] | (temp << 8);
1238 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1239 		pdata->pfilter_table[index][0] = temp;
1240 	}
1241 }
1242 
1243 /* returns hash bit number for given MAC address */
1244 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1245 {
1246 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1247 }
1248 
1249 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1250 {
1251 	struct lan78xx_priv *pdata =
1252 			container_of(param, struct lan78xx_priv, set_multicast);
1253 	struct lan78xx_net *dev = pdata->dev;
1254 	int i;
1255 
1256 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1257 		  pdata->rfe_ctl);
1258 
1259 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1260 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1261 
1262 	for (i = 1; i < NUM_OF_MAF; i++) {
1263 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1264 		lan78xx_write_reg(dev, MAF_LO(i),
1265 				  pdata->pfilter_table[i][1]);
1266 		lan78xx_write_reg(dev, MAF_HI(i),
1267 				  pdata->pfilter_table[i][0]);
1268 	}
1269 
1270 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1271 }
1272 
1273 static void lan78xx_set_multicast(struct net_device *netdev)
1274 {
1275 	struct lan78xx_net *dev = netdev_priv(netdev);
1276 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1277 	unsigned long flags;
1278 	int i;
1279 
1280 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1281 
1282 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1283 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1284 
1285 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1286 		pdata->mchash_table[i] = 0;
1287 
1288 	/* pfilter_table[0] has own HW address */
1289 	for (i = 1; i < NUM_OF_MAF; i++) {
1290 		pdata->pfilter_table[i][0] = 0;
1291 		pdata->pfilter_table[i][1] = 0;
1292 	}
1293 
1294 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1295 
1296 	if (dev->net->flags & IFF_PROMISC) {
1297 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1298 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1299 	} else {
1300 		if (dev->net->flags & IFF_ALLMULTI) {
1301 			netif_dbg(dev, drv, dev->net,
1302 				  "receive all multicast enabled");
1303 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1304 		}
1305 	}
1306 
1307 	if (netdev_mc_count(dev->net)) {
1308 		struct netdev_hw_addr *ha;
1309 		int i;
1310 
1311 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1312 
1313 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1314 
1315 		i = 1;
1316 		netdev_for_each_mc_addr(ha, netdev) {
1317 			/* set first 32 into Perfect Filter */
1318 			if (i < 33) {
1319 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1320 			} else {
1321 				u32 bitnum = lan78xx_hash(ha->addr);
1322 
1323 				pdata->mchash_table[bitnum / 32] |=
1324 							(1 << (bitnum % 32));
1325 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1326 			}
1327 			i++;
1328 		}
1329 	}
1330 
1331 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1332 
1333 	/* defer register writes to a sleepable context */
1334 	schedule_work(&pdata->set_multicast);
1335 }
1336 
1337 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1338 				      u16 lcladv, u16 rmtadv)
1339 {
1340 	u32 flow = 0, fct_flow = 0;
1341 	u8 cap;
1342 
1343 	if (dev->fc_autoneg)
1344 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1345 	else
1346 		cap = dev->fc_request_control;
1347 
1348 	if (cap & FLOW_CTRL_TX)
1349 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1350 
1351 	if (cap & FLOW_CTRL_RX)
1352 		flow |= FLOW_CR_RX_FCEN_;
1353 
1354 	if (dev->udev->speed == USB_SPEED_SUPER)
1355 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1356 	else if (dev->udev->speed == USB_SPEED_HIGH)
1357 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1358 
1359 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1360 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1361 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1362 
1363 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1364 
1365 	/* threshold value should be set before enabling flow */
1366 	lan78xx_write_reg(dev, FLOW, flow);
1367 
1368 	return 0;
1369 }
1370 
1371 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1372 
1373 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1374 {
1375 	unsigned long start_time = jiffies;
1376 	u32 val;
1377 	int ret;
1378 
1379 	mutex_lock(&dev->phy_mutex);
1380 
1381 	/* Resetting the device while there is activity on the MDIO
1382 	 * bus can result in the MAC interface locking up and not
1383 	 * completing register access transactions.
1384 	 */
1385 	ret = lan78xx_phy_wait_not_busy(dev);
1386 	if (ret < 0)
1387 		goto done;
1388 
1389 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1390 	if (ret < 0)
1391 		goto done;
1392 
1393 	val |= MAC_CR_RST_;
1394 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1395 	if (ret < 0)
1396 		goto done;
1397 
1398 	/* Wait for the reset to complete before allowing any further
1399 	 * MAC register accesses otherwise the MAC may lock up.
1400 	 */
1401 	do {
1402 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1403 		if (ret < 0)
1404 			goto done;
1405 
1406 		if (!(val & MAC_CR_RST_)) {
1407 			ret = 0;
1408 			goto done;
1409 		}
1410 	} while (!time_after(jiffies, start_time + HZ));
1411 
1412 	ret = -ETIMEDOUT;
1413 done:
1414 	mutex_unlock(&dev->phy_mutex);
1415 
1416 	return ret;
1417 }
1418 
1419 static int lan78xx_link_reset(struct lan78xx_net *dev)
1420 {
1421 	struct phy_device *phydev = dev->net->phydev;
1422 	struct ethtool_link_ksettings ecmd;
1423 	int ladv, radv, ret, link;
1424 	u32 buf;
1425 
1426 	/* clear LAN78xx interrupt status */
1427 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1428 	if (unlikely(ret < 0))
1429 		return ret;
1430 
1431 	mutex_lock(&phydev->lock);
1432 	phy_read_status(phydev);
1433 	link = phydev->link;
1434 	mutex_unlock(&phydev->lock);
1435 
1436 	if (!link && dev->link_on) {
1437 		dev->link_on = false;
1438 
1439 		/* reset MAC */
1440 		ret = lan78xx_mac_reset(dev);
1441 		if (ret < 0)
1442 			return ret;
1443 
1444 		del_timer(&dev->stat_monitor);
1445 	} else if (link && !dev->link_on) {
1446 		dev->link_on = true;
1447 
1448 		phy_ethtool_ksettings_get(phydev, &ecmd);
1449 
1450 		if (dev->udev->speed == USB_SPEED_SUPER) {
1451 			if (ecmd.base.speed == 1000) {
1452 				/* disable U2 */
1453 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1454 				if (ret < 0)
1455 					return ret;
1456 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1457 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1458 				if (ret < 0)
1459 					return ret;
1460 				/* enable U1 */
1461 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1462 				if (ret < 0)
1463 					return ret;
1464 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1465 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1466 				if (ret < 0)
1467 					return ret;
1468 			} else {
1469 				/* enable U1 & U2 */
1470 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1471 				if (ret < 0)
1472 					return ret;
1473 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1474 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1475 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1476 				if (ret < 0)
1477 					return ret;
1478 			}
1479 		}
1480 
1481 		ladv = phy_read(phydev, MII_ADVERTISE);
1482 		if (ladv < 0)
1483 			return ladv;
1484 
1485 		radv = phy_read(phydev, MII_LPA);
1486 		if (radv < 0)
1487 			return radv;
1488 
1489 		netif_dbg(dev, link, dev->net,
1490 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1491 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1492 
1493 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1494 						 radv);
1495 		if (ret < 0)
1496 			return ret;
1497 
1498 		if (!timer_pending(&dev->stat_monitor)) {
1499 			dev->delta = 1;
1500 			mod_timer(&dev->stat_monitor,
1501 				  jiffies + STAT_UPDATE_TIMER);
1502 		}
1503 
1504 		lan78xx_rx_urb_submit_all(dev);
1505 
1506 		napi_schedule(&dev->napi);
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 /* some work can't be done in tasklets, so we use keventd
1513  *
1514  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1515  * but tasklet_schedule() doesn't.	hope the failure is rare.
1516  */
1517 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1518 {
1519 	set_bit(work, &dev->flags);
1520 	if (!schedule_delayed_work(&dev->wq, 0))
1521 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1522 }
1523 
1524 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1525 {
1526 	u32 intdata;
1527 
1528 	if (urb->actual_length != 4) {
1529 		netdev_warn(dev->net,
1530 			    "unexpected urb length %d", urb->actual_length);
1531 		return;
1532 	}
1533 
1534 	intdata = get_unaligned_le32(urb->transfer_buffer);
1535 
1536 	if (intdata & INT_ENP_PHY_INT) {
1537 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1538 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1539 
1540 		if (dev->domain_data.phyirq > 0)
1541 			generic_handle_irq_safe(dev->domain_data.phyirq);
1542 	} else {
1543 		netdev_warn(dev->net,
1544 			    "unexpected interrupt: 0x%08x\n", intdata);
1545 	}
1546 }
1547 
1548 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1549 {
1550 	return MAX_EEPROM_SIZE;
1551 }
1552 
1553 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1554 				      struct ethtool_eeprom *ee, u8 *data)
1555 {
1556 	struct lan78xx_net *dev = netdev_priv(netdev);
1557 	int ret;
1558 
1559 	ret = usb_autopm_get_interface(dev->intf);
1560 	if (ret)
1561 		return ret;
1562 
1563 	ee->magic = LAN78XX_EEPROM_MAGIC;
1564 
1565 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1566 
1567 	usb_autopm_put_interface(dev->intf);
1568 
1569 	return ret;
1570 }
1571 
1572 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1573 				      struct ethtool_eeprom *ee, u8 *data)
1574 {
1575 	struct lan78xx_net *dev = netdev_priv(netdev);
1576 	int ret;
1577 
1578 	ret = usb_autopm_get_interface(dev->intf);
1579 	if (ret)
1580 		return ret;
1581 
1582 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1583 	 * to load data from EEPROM
1584 	 */
1585 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1586 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1587 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1588 		 (ee->offset == 0) &&
1589 		 (ee->len == 512) &&
1590 		 (data[0] == OTP_INDICATOR_1))
1591 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1592 
1593 	usb_autopm_put_interface(dev->intf);
1594 
1595 	return ret;
1596 }
1597 
1598 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1599 				u8 *data)
1600 {
1601 	if (stringset == ETH_SS_STATS)
1602 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1603 }
1604 
1605 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1606 {
1607 	if (sset == ETH_SS_STATS)
1608 		return ARRAY_SIZE(lan78xx_gstrings);
1609 	else
1610 		return -EOPNOTSUPP;
1611 }
1612 
1613 static void lan78xx_get_stats(struct net_device *netdev,
1614 			      struct ethtool_stats *stats, u64 *data)
1615 {
1616 	struct lan78xx_net *dev = netdev_priv(netdev);
1617 
1618 	lan78xx_update_stats(dev);
1619 
1620 	mutex_lock(&dev->stats.access_lock);
1621 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1622 	mutex_unlock(&dev->stats.access_lock);
1623 }
1624 
1625 static void lan78xx_get_wol(struct net_device *netdev,
1626 			    struct ethtool_wolinfo *wol)
1627 {
1628 	struct lan78xx_net *dev = netdev_priv(netdev);
1629 	int ret;
1630 	u32 buf;
1631 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1632 
1633 	if (usb_autopm_get_interface(dev->intf) < 0)
1634 		return;
1635 
1636 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1637 	if (unlikely(ret < 0)) {
1638 		wol->supported = 0;
1639 		wol->wolopts = 0;
1640 	} else {
1641 		if (buf & USB_CFG_RMT_WKP_) {
1642 			wol->supported = WAKE_ALL;
1643 			wol->wolopts = pdata->wol;
1644 		} else {
1645 			wol->supported = 0;
1646 			wol->wolopts = 0;
1647 		}
1648 	}
1649 
1650 	usb_autopm_put_interface(dev->intf);
1651 }
1652 
1653 static int lan78xx_set_wol(struct net_device *netdev,
1654 			   struct ethtool_wolinfo *wol)
1655 {
1656 	struct lan78xx_net *dev = netdev_priv(netdev);
1657 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1658 	int ret;
1659 
1660 	ret = usb_autopm_get_interface(dev->intf);
1661 	if (ret < 0)
1662 		return ret;
1663 
1664 	if (wol->wolopts & ~WAKE_ALL)
1665 		return -EINVAL;
1666 
1667 	pdata->wol = wol->wolopts;
1668 
1669 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1670 
1671 	phy_ethtool_set_wol(netdev->phydev, wol);
1672 
1673 	usb_autopm_put_interface(dev->intf);
1674 
1675 	return ret;
1676 }
1677 
1678 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1679 {
1680 	struct lan78xx_net *dev = netdev_priv(net);
1681 	struct phy_device *phydev = net->phydev;
1682 	int ret;
1683 	u32 buf;
1684 
1685 	ret = usb_autopm_get_interface(dev->intf);
1686 	if (ret < 0)
1687 		return ret;
1688 
1689 	ret = phy_ethtool_get_eee(phydev, edata);
1690 	if (ret < 0)
1691 		goto exit;
1692 
1693 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1694 	if (buf & MAC_CR_EEE_EN_) {
1695 		edata->eee_enabled = true;
1696 		edata->eee_active = !!(edata->advertised &
1697 				       edata->lp_advertised);
1698 		edata->tx_lpi_enabled = true;
1699 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1700 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1701 		edata->tx_lpi_timer = buf;
1702 	} else {
1703 		edata->eee_enabled = false;
1704 		edata->eee_active = false;
1705 		edata->tx_lpi_enabled = false;
1706 		edata->tx_lpi_timer = 0;
1707 	}
1708 
1709 	ret = 0;
1710 exit:
1711 	usb_autopm_put_interface(dev->intf);
1712 
1713 	return ret;
1714 }
1715 
1716 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1717 {
1718 	struct lan78xx_net *dev = netdev_priv(net);
1719 	int ret;
1720 	u32 buf;
1721 
1722 	ret = usb_autopm_get_interface(dev->intf);
1723 	if (ret < 0)
1724 		return ret;
1725 
1726 	if (edata->eee_enabled) {
1727 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1728 		buf |= MAC_CR_EEE_EN_;
1729 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1730 
1731 		phy_ethtool_set_eee(net->phydev, edata);
1732 
1733 		buf = (u32)edata->tx_lpi_timer;
1734 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1735 	} else {
1736 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1737 		buf &= ~MAC_CR_EEE_EN_;
1738 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1739 	}
1740 
1741 	usb_autopm_put_interface(dev->intf);
1742 
1743 	return 0;
1744 }
1745 
1746 static u32 lan78xx_get_link(struct net_device *net)
1747 {
1748 	u32 link;
1749 
1750 	mutex_lock(&net->phydev->lock);
1751 	phy_read_status(net->phydev);
1752 	link = net->phydev->link;
1753 	mutex_unlock(&net->phydev->lock);
1754 
1755 	return link;
1756 }
1757 
1758 static void lan78xx_get_drvinfo(struct net_device *net,
1759 				struct ethtool_drvinfo *info)
1760 {
1761 	struct lan78xx_net *dev = netdev_priv(net);
1762 
1763 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1764 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1765 }
1766 
1767 static u32 lan78xx_get_msglevel(struct net_device *net)
1768 {
1769 	struct lan78xx_net *dev = netdev_priv(net);
1770 
1771 	return dev->msg_enable;
1772 }
1773 
1774 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1775 {
1776 	struct lan78xx_net *dev = netdev_priv(net);
1777 
1778 	dev->msg_enable = level;
1779 }
1780 
1781 static int lan78xx_get_link_ksettings(struct net_device *net,
1782 				      struct ethtool_link_ksettings *cmd)
1783 {
1784 	struct lan78xx_net *dev = netdev_priv(net);
1785 	struct phy_device *phydev = net->phydev;
1786 	int ret;
1787 
1788 	ret = usb_autopm_get_interface(dev->intf);
1789 	if (ret < 0)
1790 		return ret;
1791 
1792 	phy_ethtool_ksettings_get(phydev, cmd);
1793 
1794 	usb_autopm_put_interface(dev->intf);
1795 
1796 	return ret;
1797 }
1798 
1799 static int lan78xx_set_link_ksettings(struct net_device *net,
1800 				      const struct ethtool_link_ksettings *cmd)
1801 {
1802 	struct lan78xx_net *dev = netdev_priv(net);
1803 	struct phy_device *phydev = net->phydev;
1804 	int ret = 0;
1805 	int temp;
1806 
1807 	ret = usb_autopm_get_interface(dev->intf);
1808 	if (ret < 0)
1809 		return ret;
1810 
1811 	/* change speed & duplex */
1812 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1813 
1814 	if (!cmd->base.autoneg) {
1815 		/* force link down */
1816 		temp = phy_read(phydev, MII_BMCR);
1817 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1818 		mdelay(1);
1819 		phy_write(phydev, MII_BMCR, temp);
1820 	}
1821 
1822 	usb_autopm_put_interface(dev->intf);
1823 
1824 	return ret;
1825 }
1826 
1827 static void lan78xx_get_pause(struct net_device *net,
1828 			      struct ethtool_pauseparam *pause)
1829 {
1830 	struct lan78xx_net *dev = netdev_priv(net);
1831 	struct phy_device *phydev = net->phydev;
1832 	struct ethtool_link_ksettings ecmd;
1833 
1834 	phy_ethtool_ksettings_get(phydev, &ecmd);
1835 
1836 	pause->autoneg = dev->fc_autoneg;
1837 
1838 	if (dev->fc_request_control & FLOW_CTRL_TX)
1839 		pause->tx_pause = 1;
1840 
1841 	if (dev->fc_request_control & FLOW_CTRL_RX)
1842 		pause->rx_pause = 1;
1843 }
1844 
1845 static int lan78xx_set_pause(struct net_device *net,
1846 			     struct ethtool_pauseparam *pause)
1847 {
1848 	struct lan78xx_net *dev = netdev_priv(net);
1849 	struct phy_device *phydev = net->phydev;
1850 	struct ethtool_link_ksettings ecmd;
1851 	int ret;
1852 
1853 	phy_ethtool_ksettings_get(phydev, &ecmd);
1854 
1855 	if (pause->autoneg && !ecmd.base.autoneg) {
1856 		ret = -EINVAL;
1857 		goto exit;
1858 	}
1859 
1860 	dev->fc_request_control = 0;
1861 	if (pause->rx_pause)
1862 		dev->fc_request_control |= FLOW_CTRL_RX;
1863 
1864 	if (pause->tx_pause)
1865 		dev->fc_request_control |= FLOW_CTRL_TX;
1866 
1867 	if (ecmd.base.autoneg) {
1868 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1869 		u32 mii_adv;
1870 
1871 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1872 				   ecmd.link_modes.advertising);
1873 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1874 				   ecmd.link_modes.advertising);
1875 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1876 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1877 		linkmode_or(ecmd.link_modes.advertising, fc,
1878 			    ecmd.link_modes.advertising);
1879 
1880 		phy_ethtool_ksettings_set(phydev, &ecmd);
1881 	}
1882 
1883 	dev->fc_autoneg = pause->autoneg;
1884 
1885 	ret = 0;
1886 exit:
1887 	return ret;
1888 }
1889 
1890 static int lan78xx_get_regs_len(struct net_device *netdev)
1891 {
1892 	if (!netdev->phydev)
1893 		return (sizeof(lan78xx_regs));
1894 	else
1895 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1896 }
1897 
1898 static void
1899 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1900 		 void *buf)
1901 {
1902 	u32 *data = buf;
1903 	int i, j;
1904 	struct lan78xx_net *dev = netdev_priv(netdev);
1905 
1906 	/* Read Device/MAC registers */
1907 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1908 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1909 
1910 	if (!netdev->phydev)
1911 		return;
1912 
1913 	/* Read PHY registers */
1914 	for (j = 0; j < 32; i++, j++)
1915 		data[i] = phy_read(netdev->phydev, j);
1916 }
1917 
1918 static const struct ethtool_ops lan78xx_ethtool_ops = {
1919 	.get_link	= lan78xx_get_link,
1920 	.nway_reset	= phy_ethtool_nway_reset,
1921 	.get_drvinfo	= lan78xx_get_drvinfo,
1922 	.get_msglevel	= lan78xx_get_msglevel,
1923 	.set_msglevel	= lan78xx_set_msglevel,
1924 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1925 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1926 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1927 	.get_ethtool_stats = lan78xx_get_stats,
1928 	.get_sset_count = lan78xx_get_sset_count,
1929 	.get_strings	= lan78xx_get_strings,
1930 	.get_wol	= lan78xx_get_wol,
1931 	.set_wol	= lan78xx_set_wol,
1932 	.get_ts_info	= ethtool_op_get_ts_info,
1933 	.get_eee	= lan78xx_get_eee,
1934 	.set_eee	= lan78xx_set_eee,
1935 	.get_pauseparam	= lan78xx_get_pause,
1936 	.set_pauseparam	= lan78xx_set_pause,
1937 	.get_link_ksettings = lan78xx_get_link_ksettings,
1938 	.set_link_ksettings = lan78xx_set_link_ksettings,
1939 	.get_regs_len	= lan78xx_get_regs_len,
1940 	.get_regs	= lan78xx_get_regs,
1941 };
1942 
1943 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1944 {
1945 	u32 addr_lo, addr_hi;
1946 	u8 addr[6];
1947 
1948 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1949 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1950 
1951 	addr[0] = addr_lo & 0xFF;
1952 	addr[1] = (addr_lo >> 8) & 0xFF;
1953 	addr[2] = (addr_lo >> 16) & 0xFF;
1954 	addr[3] = (addr_lo >> 24) & 0xFF;
1955 	addr[4] = addr_hi & 0xFF;
1956 	addr[5] = (addr_hi >> 8) & 0xFF;
1957 
1958 	if (!is_valid_ether_addr(addr)) {
1959 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1960 			/* valid address present in Device Tree */
1961 			netif_dbg(dev, ifup, dev->net,
1962 				  "MAC address read from Device Tree");
1963 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1964 						 ETH_ALEN, addr) == 0) ||
1965 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1966 					      ETH_ALEN, addr) == 0)) &&
1967 			   is_valid_ether_addr(addr)) {
1968 			/* eeprom values are valid so use them */
1969 			netif_dbg(dev, ifup, dev->net,
1970 				  "MAC address read from EEPROM");
1971 		} else {
1972 			/* generate random MAC */
1973 			eth_random_addr(addr);
1974 			netif_dbg(dev, ifup, dev->net,
1975 				  "MAC address set to random addr");
1976 		}
1977 
1978 		addr_lo = addr[0] | (addr[1] << 8) |
1979 			  (addr[2] << 16) | (addr[3] << 24);
1980 		addr_hi = addr[4] | (addr[5] << 8);
1981 
1982 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1983 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1984 	}
1985 
1986 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1987 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1988 
1989 	eth_hw_addr_set(dev->net, addr);
1990 }
1991 
1992 /* MDIO read and write wrappers for phylib */
1993 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1994 {
1995 	struct lan78xx_net *dev = bus->priv;
1996 	u32 val, addr;
1997 	int ret;
1998 
1999 	ret = usb_autopm_get_interface(dev->intf);
2000 	if (ret < 0)
2001 		return ret;
2002 
2003 	mutex_lock(&dev->phy_mutex);
2004 
2005 	/* confirm MII not busy */
2006 	ret = lan78xx_phy_wait_not_busy(dev);
2007 	if (ret < 0)
2008 		goto done;
2009 
2010 	/* set the address, index & direction (read from PHY) */
2011 	addr = mii_access(phy_id, idx, MII_READ);
2012 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2013 
2014 	ret = lan78xx_phy_wait_not_busy(dev);
2015 	if (ret < 0)
2016 		goto done;
2017 
2018 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2019 
2020 	ret = (int)(val & 0xFFFF);
2021 
2022 done:
2023 	mutex_unlock(&dev->phy_mutex);
2024 	usb_autopm_put_interface(dev->intf);
2025 
2026 	return ret;
2027 }
2028 
2029 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2030 				 u16 regval)
2031 {
2032 	struct lan78xx_net *dev = bus->priv;
2033 	u32 val, addr;
2034 	int ret;
2035 
2036 	ret = usb_autopm_get_interface(dev->intf);
2037 	if (ret < 0)
2038 		return ret;
2039 
2040 	mutex_lock(&dev->phy_mutex);
2041 
2042 	/* confirm MII not busy */
2043 	ret = lan78xx_phy_wait_not_busy(dev);
2044 	if (ret < 0)
2045 		goto done;
2046 
2047 	val = (u32)regval;
2048 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2049 
2050 	/* set the address, index & direction (write to PHY) */
2051 	addr = mii_access(phy_id, idx, MII_WRITE);
2052 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2053 
2054 	ret = lan78xx_phy_wait_not_busy(dev);
2055 	if (ret < 0)
2056 		goto done;
2057 
2058 done:
2059 	mutex_unlock(&dev->phy_mutex);
2060 	usb_autopm_put_interface(dev->intf);
2061 	return 0;
2062 }
2063 
2064 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2065 {
2066 	struct device_node *node;
2067 	int ret;
2068 
2069 	dev->mdiobus = mdiobus_alloc();
2070 	if (!dev->mdiobus) {
2071 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2072 		return -ENOMEM;
2073 	}
2074 
2075 	dev->mdiobus->priv = (void *)dev;
2076 	dev->mdiobus->read = lan78xx_mdiobus_read;
2077 	dev->mdiobus->write = lan78xx_mdiobus_write;
2078 	dev->mdiobus->name = "lan78xx-mdiobus";
2079 	dev->mdiobus->parent = &dev->udev->dev;
2080 
2081 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2082 		 dev->udev->bus->busnum, dev->udev->devnum);
2083 
2084 	switch (dev->chipid) {
2085 	case ID_REV_CHIP_ID_7800_:
2086 	case ID_REV_CHIP_ID_7850_:
2087 		/* set to internal PHY id */
2088 		dev->mdiobus->phy_mask = ~(1 << 1);
2089 		break;
2090 	case ID_REV_CHIP_ID_7801_:
2091 		/* scan thru PHYAD[2..0] */
2092 		dev->mdiobus->phy_mask = ~(0xFF);
2093 		break;
2094 	}
2095 
2096 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2097 	ret = of_mdiobus_register(dev->mdiobus, node);
2098 	of_node_put(node);
2099 	if (ret) {
2100 		netdev_err(dev->net, "can't register MDIO bus\n");
2101 		goto exit1;
2102 	}
2103 
2104 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2105 	return 0;
2106 exit1:
2107 	mdiobus_free(dev->mdiobus);
2108 	return ret;
2109 }
2110 
2111 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2112 {
2113 	mdiobus_unregister(dev->mdiobus);
2114 	mdiobus_free(dev->mdiobus);
2115 }
2116 
2117 static void lan78xx_link_status_change(struct net_device *net)
2118 {
2119 	struct phy_device *phydev = net->phydev;
2120 	int temp;
2121 
2122 	/* At forced 100 F/H mode, chip may fail to set mode correctly
2123 	 * when cable is switched between long(~50+m) and short one.
2124 	 * As workaround, set to 10 before setting to 100
2125 	 * at forced 100 F/H mode.
2126 	 */
2127 	if (!phydev->autoneg && (phydev->speed == 100)) {
2128 		/* disable phy interrupt */
2129 		temp = phy_read(phydev, LAN88XX_INT_MASK);
2130 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
2131 		phy_write(phydev, LAN88XX_INT_MASK, temp);
2132 
2133 		temp = phy_read(phydev, MII_BMCR);
2134 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
2135 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
2136 		temp |= BMCR_SPEED100;
2137 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
2138 
2139 		/* clear pending interrupt generated while workaround */
2140 		temp = phy_read(phydev, LAN88XX_INT_STS);
2141 
2142 		/* enable phy interrupt back */
2143 		temp = phy_read(phydev, LAN88XX_INT_MASK);
2144 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
2145 		phy_write(phydev, LAN88XX_INT_MASK, temp);
2146 	}
2147 }
2148 
2149 static int irq_map(struct irq_domain *d, unsigned int irq,
2150 		   irq_hw_number_t hwirq)
2151 {
2152 	struct irq_domain_data *data = d->host_data;
2153 
2154 	irq_set_chip_data(irq, data);
2155 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2156 	irq_set_noprobe(irq);
2157 
2158 	return 0;
2159 }
2160 
2161 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2162 {
2163 	irq_set_chip_and_handler(irq, NULL, NULL);
2164 	irq_set_chip_data(irq, NULL);
2165 }
2166 
2167 static const struct irq_domain_ops chip_domain_ops = {
2168 	.map	= irq_map,
2169 	.unmap	= irq_unmap,
2170 };
2171 
2172 static void lan78xx_irq_mask(struct irq_data *irqd)
2173 {
2174 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2175 
2176 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2177 }
2178 
2179 static void lan78xx_irq_unmask(struct irq_data *irqd)
2180 {
2181 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2182 
2183 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2184 }
2185 
2186 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2187 {
2188 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2189 
2190 	mutex_lock(&data->irq_lock);
2191 }
2192 
2193 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2194 {
2195 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2196 	struct lan78xx_net *dev =
2197 			container_of(data, struct lan78xx_net, domain_data);
2198 	u32 buf;
2199 
2200 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2201 	 * are only two callbacks executed in non-atomic contex.
2202 	 */
2203 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2204 	if (buf != data->irqenable)
2205 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2206 
2207 	mutex_unlock(&data->irq_lock);
2208 }
2209 
2210 static struct irq_chip lan78xx_irqchip = {
2211 	.name			= "lan78xx-irqs",
2212 	.irq_mask		= lan78xx_irq_mask,
2213 	.irq_unmask		= lan78xx_irq_unmask,
2214 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2215 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2216 };
2217 
2218 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2219 {
2220 	struct device_node *of_node;
2221 	struct irq_domain *irqdomain;
2222 	unsigned int irqmap = 0;
2223 	u32 buf;
2224 	int ret = 0;
2225 
2226 	of_node = dev->udev->dev.parent->of_node;
2227 
2228 	mutex_init(&dev->domain_data.irq_lock);
2229 
2230 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2231 	dev->domain_data.irqenable = buf;
2232 
2233 	dev->domain_data.irqchip = &lan78xx_irqchip;
2234 	dev->domain_data.irq_handler = handle_simple_irq;
2235 
2236 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2237 					  &chip_domain_ops, &dev->domain_data);
2238 	if (irqdomain) {
2239 		/* create mapping for PHY interrupt */
2240 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2241 		if (!irqmap) {
2242 			irq_domain_remove(irqdomain);
2243 
2244 			irqdomain = NULL;
2245 			ret = -EINVAL;
2246 		}
2247 	} else {
2248 		ret = -EINVAL;
2249 	}
2250 
2251 	dev->domain_data.irqdomain = irqdomain;
2252 	dev->domain_data.phyirq = irqmap;
2253 
2254 	return ret;
2255 }
2256 
2257 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2258 {
2259 	if (dev->domain_data.phyirq > 0) {
2260 		irq_dispose_mapping(dev->domain_data.phyirq);
2261 
2262 		if (dev->domain_data.irqdomain)
2263 			irq_domain_remove(dev->domain_data.irqdomain);
2264 	}
2265 	dev->domain_data.phyirq = 0;
2266 	dev->domain_data.irqdomain = NULL;
2267 }
2268 
2269 static int lan8835_fixup(struct phy_device *phydev)
2270 {
2271 	int buf;
2272 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2273 
2274 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2275 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2276 	buf &= ~0x1800;
2277 	buf |= 0x0800;
2278 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2279 
2280 	/* RGMII MAC TXC Delay Enable */
2281 	lan78xx_write_reg(dev, MAC_RGMII_ID,
2282 			  MAC_RGMII_ID_TXC_DELAY_EN_);
2283 
2284 	/* RGMII TX DLL Tune Adjust */
2285 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2286 
2287 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2288 
2289 	return 1;
2290 }
2291 
2292 static int ksz9031rnx_fixup(struct phy_device *phydev)
2293 {
2294 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2295 
2296 	/* Micrel9301RNX PHY configuration */
2297 	/* RGMII Control Signal Pad Skew */
2298 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2299 	/* RGMII RX Data Pad Skew */
2300 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2301 	/* RGMII RX Clock Pad Skew */
2302 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2303 
2304 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2305 
2306 	return 1;
2307 }
2308 
2309 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2310 {
2311 	u32 buf;
2312 	int ret;
2313 	struct fixed_phy_status fphy_status = {
2314 		.link = 1,
2315 		.speed = SPEED_1000,
2316 		.duplex = DUPLEX_FULL,
2317 	};
2318 	struct phy_device *phydev;
2319 
2320 	phydev = phy_find_first(dev->mdiobus);
2321 	if (!phydev) {
2322 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2323 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2324 		if (IS_ERR(phydev)) {
2325 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2326 			return NULL;
2327 		}
2328 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2329 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2330 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2331 					MAC_RGMII_ID_TXC_DELAY_EN_);
2332 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2333 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2334 		buf |= HW_CFG_CLK125_EN_;
2335 		buf |= HW_CFG_REFCLK25_EN_;
2336 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2337 	} else {
2338 		if (!phydev->drv) {
2339 			netdev_err(dev->net, "no PHY driver found\n");
2340 			return NULL;
2341 		}
2342 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2343 		/* external PHY fixup for KSZ9031RNX */
2344 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2345 						 ksz9031rnx_fixup);
2346 		if (ret < 0) {
2347 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2348 			return NULL;
2349 		}
2350 		/* external PHY fixup for LAN8835 */
2351 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2352 						 lan8835_fixup);
2353 		if (ret < 0) {
2354 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2355 			return NULL;
2356 		}
2357 		/* add more external PHY fixup here if needed */
2358 
2359 		phydev->is_internal = false;
2360 	}
2361 	return phydev;
2362 }
2363 
2364 static int lan78xx_phy_init(struct lan78xx_net *dev)
2365 {
2366 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2367 	int ret;
2368 	u32 mii_adv;
2369 	struct phy_device *phydev;
2370 
2371 	switch (dev->chipid) {
2372 	case ID_REV_CHIP_ID_7801_:
2373 		phydev = lan7801_phy_init(dev);
2374 		if (!phydev) {
2375 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2376 			return -EIO;
2377 		}
2378 		break;
2379 
2380 	case ID_REV_CHIP_ID_7800_:
2381 	case ID_REV_CHIP_ID_7850_:
2382 		phydev = phy_find_first(dev->mdiobus);
2383 		if (!phydev) {
2384 			netdev_err(dev->net, "no PHY found\n");
2385 			return -EIO;
2386 		}
2387 		phydev->is_internal = true;
2388 		dev->interface = PHY_INTERFACE_MODE_GMII;
2389 		break;
2390 
2391 	default:
2392 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2393 		return -EIO;
2394 	}
2395 
2396 	/* if phyirq is not set, use polling mode in phylib */
2397 	if (dev->domain_data.phyirq > 0)
2398 		phydev->irq = dev->domain_data.phyirq;
2399 	else
2400 		phydev->irq = PHY_POLL;
2401 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2402 
2403 	/* set to AUTOMDIX */
2404 	phydev->mdix = ETH_TP_MDI_AUTO;
2405 
2406 	ret = phy_connect_direct(dev->net, phydev,
2407 				 lan78xx_link_status_change,
2408 				 dev->interface);
2409 	if (ret) {
2410 		netdev_err(dev->net, "can't attach PHY to %s\n",
2411 			   dev->mdiobus->id);
2412 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2413 			if (phy_is_pseudo_fixed_link(phydev)) {
2414 				fixed_phy_unregister(phydev);
2415 			} else {
2416 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2417 							     0xfffffff0);
2418 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2419 							     0xfffffff0);
2420 			}
2421 		}
2422 		return -EIO;
2423 	}
2424 
2425 	/* MAC doesn't support 1000T Half */
2426 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2427 
2428 	/* support both flow controls */
2429 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2430 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2431 			   phydev->advertising);
2432 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2433 			   phydev->advertising);
2434 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2435 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2436 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2437 
2438 	if (phydev->mdio.dev.of_node) {
2439 		u32 reg;
2440 		int len;
2441 
2442 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2443 						      "microchip,led-modes",
2444 						      sizeof(u32));
2445 		if (len >= 0) {
2446 			/* Ensure the appropriate LEDs are enabled */
2447 			lan78xx_read_reg(dev, HW_CFG, &reg);
2448 			reg &= ~(HW_CFG_LED0_EN_ |
2449 				 HW_CFG_LED1_EN_ |
2450 				 HW_CFG_LED2_EN_ |
2451 				 HW_CFG_LED3_EN_);
2452 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2453 				(len > 1) * HW_CFG_LED1_EN_ |
2454 				(len > 2) * HW_CFG_LED2_EN_ |
2455 				(len > 3) * HW_CFG_LED3_EN_;
2456 			lan78xx_write_reg(dev, HW_CFG, reg);
2457 		}
2458 	}
2459 
2460 	genphy_config_aneg(phydev);
2461 
2462 	dev->fc_autoneg = phydev->autoneg;
2463 
2464 	return 0;
2465 }
2466 
2467 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2468 {
2469 	u32 buf;
2470 	bool rxenabled;
2471 
2472 	lan78xx_read_reg(dev, MAC_RX, &buf);
2473 
2474 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2475 
2476 	if (rxenabled) {
2477 		buf &= ~MAC_RX_RXEN_;
2478 		lan78xx_write_reg(dev, MAC_RX, buf);
2479 	}
2480 
2481 	/* add 4 to size for FCS */
2482 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2483 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2484 
2485 	lan78xx_write_reg(dev, MAC_RX, buf);
2486 
2487 	if (rxenabled) {
2488 		buf |= MAC_RX_RXEN_;
2489 		lan78xx_write_reg(dev, MAC_RX, buf);
2490 	}
2491 
2492 	return 0;
2493 }
2494 
2495 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2496 {
2497 	struct sk_buff *skb;
2498 	unsigned long flags;
2499 	int count = 0;
2500 
2501 	spin_lock_irqsave(&q->lock, flags);
2502 	while (!skb_queue_empty(q)) {
2503 		struct skb_data	*entry;
2504 		struct urb *urb;
2505 		int ret;
2506 
2507 		skb_queue_walk(q, skb) {
2508 			entry = (struct skb_data *)skb->cb;
2509 			if (entry->state != unlink_start)
2510 				goto found;
2511 		}
2512 		break;
2513 found:
2514 		entry->state = unlink_start;
2515 		urb = entry->urb;
2516 
2517 		/* Get reference count of the URB to avoid it to be
2518 		 * freed during usb_unlink_urb, which may trigger
2519 		 * use-after-free problem inside usb_unlink_urb since
2520 		 * usb_unlink_urb is always racing with .complete
2521 		 * handler(include defer_bh).
2522 		 */
2523 		usb_get_urb(urb);
2524 		spin_unlock_irqrestore(&q->lock, flags);
2525 		/* during some PM-driven resume scenarios,
2526 		 * these (async) unlinks complete immediately
2527 		 */
2528 		ret = usb_unlink_urb(urb);
2529 		if (ret != -EINPROGRESS && ret != 0)
2530 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2531 		else
2532 			count++;
2533 		usb_put_urb(urb);
2534 		spin_lock_irqsave(&q->lock, flags);
2535 	}
2536 	spin_unlock_irqrestore(&q->lock, flags);
2537 	return count;
2538 }
2539 
2540 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2541 {
2542 	struct lan78xx_net *dev = netdev_priv(netdev);
2543 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2544 	int ret;
2545 
2546 	/* no second zero-length packet read wanted after mtu-sized packets */
2547 	if ((max_frame_len % dev->maxpacket) == 0)
2548 		return -EDOM;
2549 
2550 	ret = usb_autopm_get_interface(dev->intf);
2551 	if (ret < 0)
2552 		return ret;
2553 
2554 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2555 	if (!ret)
2556 		netdev->mtu = new_mtu;
2557 
2558 	usb_autopm_put_interface(dev->intf);
2559 
2560 	return ret;
2561 }
2562 
2563 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2564 {
2565 	struct lan78xx_net *dev = netdev_priv(netdev);
2566 	struct sockaddr *addr = p;
2567 	u32 addr_lo, addr_hi;
2568 
2569 	if (netif_running(netdev))
2570 		return -EBUSY;
2571 
2572 	if (!is_valid_ether_addr(addr->sa_data))
2573 		return -EADDRNOTAVAIL;
2574 
2575 	eth_hw_addr_set(netdev, addr->sa_data);
2576 
2577 	addr_lo = netdev->dev_addr[0] |
2578 		  netdev->dev_addr[1] << 8 |
2579 		  netdev->dev_addr[2] << 16 |
2580 		  netdev->dev_addr[3] << 24;
2581 	addr_hi = netdev->dev_addr[4] |
2582 		  netdev->dev_addr[5] << 8;
2583 
2584 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2585 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2586 
2587 	/* Added to support MAC address changes */
2588 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2589 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2590 
2591 	return 0;
2592 }
2593 
2594 /* Enable or disable Rx checksum offload engine */
2595 static int lan78xx_set_features(struct net_device *netdev,
2596 				netdev_features_t features)
2597 {
2598 	struct lan78xx_net *dev = netdev_priv(netdev);
2599 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2600 	unsigned long flags;
2601 
2602 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2603 
2604 	if (features & NETIF_F_RXCSUM) {
2605 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2606 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2607 	} else {
2608 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2609 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2610 	}
2611 
2612 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2613 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2614 	else
2615 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2616 
2617 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2618 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2619 	else
2620 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2621 
2622 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2623 
2624 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2625 
2626 	return 0;
2627 }
2628 
2629 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2630 {
2631 	struct lan78xx_priv *pdata =
2632 			container_of(param, struct lan78xx_priv, set_vlan);
2633 	struct lan78xx_net *dev = pdata->dev;
2634 
2635 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2636 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2637 }
2638 
2639 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2640 				   __be16 proto, u16 vid)
2641 {
2642 	struct lan78xx_net *dev = netdev_priv(netdev);
2643 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2644 	u16 vid_bit_index;
2645 	u16 vid_dword_index;
2646 
2647 	vid_dword_index = (vid >> 5) & 0x7F;
2648 	vid_bit_index = vid & 0x1F;
2649 
2650 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2651 
2652 	/* defer register writes to a sleepable context */
2653 	schedule_work(&pdata->set_vlan);
2654 
2655 	return 0;
2656 }
2657 
2658 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2659 				    __be16 proto, u16 vid)
2660 {
2661 	struct lan78xx_net *dev = netdev_priv(netdev);
2662 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2663 	u16 vid_bit_index;
2664 	u16 vid_dword_index;
2665 
2666 	vid_dword_index = (vid >> 5) & 0x7F;
2667 	vid_bit_index = vid & 0x1F;
2668 
2669 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2670 
2671 	/* defer register writes to a sleepable context */
2672 	schedule_work(&pdata->set_vlan);
2673 
2674 	return 0;
2675 }
2676 
2677 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2678 {
2679 	int ret;
2680 	u32 buf;
2681 	u32 regs[6] = { 0 };
2682 
2683 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2684 	if (buf & USB_CFG1_LTM_ENABLE_) {
2685 		u8 temp[2];
2686 		/* Get values from EEPROM first */
2687 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2688 			if (temp[0] == 24) {
2689 				ret = lan78xx_read_raw_eeprom(dev,
2690 							      temp[1] * 2,
2691 							      24,
2692 							      (u8 *)regs);
2693 				if (ret < 0)
2694 					return;
2695 			}
2696 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2697 			if (temp[0] == 24) {
2698 				ret = lan78xx_read_raw_otp(dev,
2699 							   temp[1] * 2,
2700 							   24,
2701 							   (u8 *)regs);
2702 				if (ret < 0)
2703 					return;
2704 			}
2705 		}
2706 	}
2707 
2708 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2709 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2710 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2711 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2712 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2713 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2714 }
2715 
2716 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2717 {
2718 	int result = 0;
2719 
2720 	switch (dev->udev->speed) {
2721 	case USB_SPEED_SUPER:
2722 		dev->rx_urb_size = RX_SS_URB_SIZE;
2723 		dev->tx_urb_size = TX_SS_URB_SIZE;
2724 		dev->n_rx_urbs = RX_SS_URB_NUM;
2725 		dev->n_tx_urbs = TX_SS_URB_NUM;
2726 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2727 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2728 		break;
2729 	case USB_SPEED_HIGH:
2730 		dev->rx_urb_size = RX_HS_URB_SIZE;
2731 		dev->tx_urb_size = TX_HS_URB_SIZE;
2732 		dev->n_rx_urbs = RX_HS_URB_NUM;
2733 		dev->n_tx_urbs = TX_HS_URB_NUM;
2734 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2735 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2736 		break;
2737 	case USB_SPEED_FULL:
2738 		dev->rx_urb_size = RX_FS_URB_SIZE;
2739 		dev->tx_urb_size = TX_FS_URB_SIZE;
2740 		dev->n_rx_urbs = RX_FS_URB_NUM;
2741 		dev->n_tx_urbs = TX_FS_URB_NUM;
2742 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2743 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2744 		break;
2745 	default:
2746 		netdev_warn(dev->net, "USB bus speed not supported\n");
2747 		result = -EIO;
2748 		break;
2749 	}
2750 
2751 	return result;
2752 }
2753 
2754 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2755 {
2756 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2757 }
2758 
2759 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2760 			   u32 hw_disabled)
2761 {
2762 	unsigned long timeout;
2763 	bool stopped = true;
2764 	int ret;
2765 	u32 buf;
2766 
2767 	/* Stop the h/w block (if not already stopped) */
2768 
2769 	ret = lan78xx_read_reg(dev, reg, &buf);
2770 	if (ret < 0)
2771 		return ret;
2772 
2773 	if (buf & hw_enabled) {
2774 		buf &= ~hw_enabled;
2775 
2776 		ret = lan78xx_write_reg(dev, reg, buf);
2777 		if (ret < 0)
2778 			return ret;
2779 
2780 		stopped = false;
2781 		timeout = jiffies + HW_DISABLE_TIMEOUT;
2782 		do  {
2783 			ret = lan78xx_read_reg(dev, reg, &buf);
2784 			if (ret < 0)
2785 				return ret;
2786 
2787 			if (buf & hw_disabled)
2788 				stopped = true;
2789 			else
2790 				msleep(HW_DISABLE_DELAY_MS);
2791 		} while (!stopped && !time_after(jiffies, timeout));
2792 	}
2793 
2794 	ret = stopped ? 0 : -ETIME;
2795 
2796 	return ret;
2797 }
2798 
2799 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2800 {
2801 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2802 }
2803 
2804 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2805 {
2806 	int ret;
2807 
2808 	netif_dbg(dev, drv, dev->net, "start tx path");
2809 
2810 	/* Start the MAC transmitter */
2811 
2812 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2813 	if (ret < 0)
2814 		return ret;
2815 
2816 	/* Start the Tx FIFO */
2817 
2818 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2819 	if (ret < 0)
2820 		return ret;
2821 
2822 	return 0;
2823 }
2824 
2825 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2826 {
2827 	int ret;
2828 
2829 	netif_dbg(dev, drv, dev->net, "stop tx path");
2830 
2831 	/* Stop the Tx FIFO */
2832 
2833 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2834 	if (ret < 0)
2835 		return ret;
2836 
2837 	/* Stop the MAC transmitter */
2838 
2839 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2840 	if (ret < 0)
2841 		return ret;
2842 
2843 	return 0;
2844 }
2845 
2846 /* The caller must ensure the Tx path is stopped before calling
2847  * lan78xx_flush_tx_fifo().
2848  */
2849 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2850 {
2851 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2852 }
2853 
2854 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2855 {
2856 	int ret;
2857 
2858 	netif_dbg(dev, drv, dev->net, "start rx path");
2859 
2860 	/* Start the Rx FIFO */
2861 
2862 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2863 	if (ret < 0)
2864 		return ret;
2865 
2866 	/* Start the MAC receiver*/
2867 
2868 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2869 	if (ret < 0)
2870 		return ret;
2871 
2872 	return 0;
2873 }
2874 
2875 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2876 {
2877 	int ret;
2878 
2879 	netif_dbg(dev, drv, dev->net, "stop rx path");
2880 
2881 	/* Stop the MAC receiver */
2882 
2883 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2884 	if (ret < 0)
2885 		return ret;
2886 
2887 	/* Stop the Rx FIFO */
2888 
2889 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2890 	if (ret < 0)
2891 		return ret;
2892 
2893 	return 0;
2894 }
2895 
2896 /* The caller must ensure the Rx path is stopped before calling
2897  * lan78xx_flush_rx_fifo().
2898  */
2899 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2900 {
2901 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2902 }
2903 
2904 static int lan78xx_reset(struct lan78xx_net *dev)
2905 {
2906 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2907 	unsigned long timeout;
2908 	int ret;
2909 	u32 buf;
2910 	u8 sig;
2911 
2912 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2913 	if (ret < 0)
2914 		return ret;
2915 
2916 	buf |= HW_CFG_LRST_;
2917 
2918 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2919 	if (ret < 0)
2920 		return ret;
2921 
2922 	timeout = jiffies + HZ;
2923 	do {
2924 		mdelay(1);
2925 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2926 		if (ret < 0)
2927 			return ret;
2928 
2929 		if (time_after(jiffies, timeout)) {
2930 			netdev_warn(dev->net,
2931 				    "timeout on completion of LiteReset");
2932 			ret = -ETIMEDOUT;
2933 			return ret;
2934 		}
2935 	} while (buf & HW_CFG_LRST_);
2936 
2937 	lan78xx_init_mac_address(dev);
2938 
2939 	/* save DEVID for later usage */
2940 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2941 	if (ret < 0)
2942 		return ret;
2943 
2944 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2945 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2946 
2947 	/* Respond to the IN token with a NAK */
2948 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2949 	if (ret < 0)
2950 		return ret;
2951 
2952 	buf |= USB_CFG_BIR_;
2953 
2954 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2955 	if (ret < 0)
2956 		return ret;
2957 
2958 	/* Init LTM */
2959 	lan78xx_init_ltm(dev);
2960 
2961 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
2962 	if (ret < 0)
2963 		return ret;
2964 
2965 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
2966 	if (ret < 0)
2967 		return ret;
2968 
2969 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2970 	if (ret < 0)
2971 		return ret;
2972 
2973 	buf |= HW_CFG_MEF_;
2974 
2975 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2976 	if (ret < 0)
2977 		return ret;
2978 
2979 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2980 	if (ret < 0)
2981 		return ret;
2982 
2983 	buf |= USB_CFG_BCE_;
2984 
2985 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2986 	if (ret < 0)
2987 		return ret;
2988 
2989 	/* set FIFO sizes */
2990 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2991 
2992 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2993 	if (ret < 0)
2994 		return ret;
2995 
2996 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2997 
2998 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2999 	if (ret < 0)
3000 		return ret;
3001 
3002 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3003 	if (ret < 0)
3004 		return ret;
3005 
3006 	ret = lan78xx_write_reg(dev, FLOW, 0);
3007 	if (ret < 0)
3008 		return ret;
3009 
3010 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3011 	if (ret < 0)
3012 		return ret;
3013 
3014 	/* Don't need rfe_ctl_lock during initialisation */
3015 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3016 	if (ret < 0)
3017 		return ret;
3018 
3019 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3020 
3021 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3022 	if (ret < 0)
3023 		return ret;
3024 
3025 	/* Enable or disable checksum offload engines */
3026 	ret = lan78xx_set_features(dev->net, dev->net->features);
3027 	if (ret < 0)
3028 		return ret;
3029 
3030 	lan78xx_set_multicast(dev->net);
3031 
3032 	/* reset PHY */
3033 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3034 	if (ret < 0)
3035 		return ret;
3036 
3037 	buf |= PMT_CTL_PHY_RST_;
3038 
3039 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3040 	if (ret < 0)
3041 		return ret;
3042 
3043 	timeout = jiffies + HZ;
3044 	do {
3045 		mdelay(1);
3046 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3047 		if (ret < 0)
3048 			return ret;
3049 
3050 		if (time_after(jiffies, timeout)) {
3051 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3052 			ret = -ETIMEDOUT;
3053 			return ret;
3054 		}
3055 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3056 
3057 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3058 	if (ret < 0)
3059 		return ret;
3060 
3061 	/* LAN7801 only has RGMII mode */
3062 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3063 		buf &= ~MAC_CR_GMII_EN_;
3064 
3065 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
3066 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3067 		if (!ret && sig != EEPROM_INDICATOR) {
3068 			/* Implies there is no external eeprom. Set mac speed */
3069 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3070 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3071 		}
3072 	}
3073 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3074 	if (ret < 0)
3075 		return ret;
3076 
3077 	ret = lan78xx_set_rx_max_frame_length(dev,
3078 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3079 
3080 	return ret;
3081 }
3082 
3083 static void lan78xx_init_stats(struct lan78xx_net *dev)
3084 {
3085 	u32 *p;
3086 	int i;
3087 
3088 	/* initialize for stats update
3089 	 * some counters are 20bits and some are 32bits
3090 	 */
3091 	p = (u32 *)&dev->stats.rollover_max;
3092 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3093 		p[i] = 0xFFFFF;
3094 
3095 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3096 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3097 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3098 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3099 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3100 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3101 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3102 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3103 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3104 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3105 
3106 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3107 }
3108 
3109 static int lan78xx_open(struct net_device *net)
3110 {
3111 	struct lan78xx_net *dev = netdev_priv(net);
3112 	int ret;
3113 
3114 	netif_dbg(dev, ifup, dev->net, "open device");
3115 
3116 	ret = usb_autopm_get_interface(dev->intf);
3117 	if (ret < 0)
3118 		return ret;
3119 
3120 	mutex_lock(&dev->dev_mutex);
3121 
3122 	phy_start(net->phydev);
3123 
3124 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3125 
3126 	/* for Link Check */
3127 	if (dev->urb_intr) {
3128 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3129 		if (ret < 0) {
3130 			netif_err(dev, ifup, dev->net,
3131 				  "intr submit %d\n", ret);
3132 			goto done;
3133 		}
3134 	}
3135 
3136 	ret = lan78xx_flush_rx_fifo(dev);
3137 	if (ret < 0)
3138 		goto done;
3139 	ret = lan78xx_flush_tx_fifo(dev);
3140 	if (ret < 0)
3141 		goto done;
3142 
3143 	ret = lan78xx_start_tx_path(dev);
3144 	if (ret < 0)
3145 		goto done;
3146 	ret = lan78xx_start_rx_path(dev);
3147 	if (ret < 0)
3148 		goto done;
3149 
3150 	lan78xx_init_stats(dev);
3151 
3152 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3153 
3154 	netif_start_queue(net);
3155 
3156 	dev->link_on = false;
3157 
3158 	napi_enable(&dev->napi);
3159 
3160 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3161 done:
3162 	mutex_unlock(&dev->dev_mutex);
3163 
3164 	usb_autopm_put_interface(dev->intf);
3165 
3166 	return ret;
3167 }
3168 
3169 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3170 {
3171 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3172 	DECLARE_WAITQUEUE(wait, current);
3173 	int temp;
3174 
3175 	/* ensure there are no more active urbs */
3176 	add_wait_queue(&unlink_wakeup, &wait);
3177 	set_current_state(TASK_UNINTERRUPTIBLE);
3178 	dev->wait = &unlink_wakeup;
3179 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3180 
3181 	/* maybe wait for deletions to finish. */
3182 	while (!skb_queue_empty(&dev->rxq) ||
3183 	       !skb_queue_empty(&dev->txq)) {
3184 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3185 		set_current_state(TASK_UNINTERRUPTIBLE);
3186 		netif_dbg(dev, ifdown, dev->net,
3187 			  "waited for %d urb completions", temp);
3188 	}
3189 	set_current_state(TASK_RUNNING);
3190 	dev->wait = NULL;
3191 	remove_wait_queue(&unlink_wakeup, &wait);
3192 
3193 	/* empty Rx done, Rx overflow and Tx pend queues
3194 	 */
3195 	while (!skb_queue_empty(&dev->rxq_done)) {
3196 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3197 
3198 		lan78xx_release_rx_buf(dev, skb);
3199 	}
3200 
3201 	skb_queue_purge(&dev->rxq_overflow);
3202 	skb_queue_purge(&dev->txq_pend);
3203 }
3204 
3205 static int lan78xx_stop(struct net_device *net)
3206 {
3207 	struct lan78xx_net *dev = netdev_priv(net);
3208 
3209 	netif_dbg(dev, ifup, dev->net, "stop device");
3210 
3211 	mutex_lock(&dev->dev_mutex);
3212 
3213 	if (timer_pending(&dev->stat_monitor))
3214 		del_timer_sync(&dev->stat_monitor);
3215 
3216 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3217 	netif_stop_queue(net);
3218 	napi_disable(&dev->napi);
3219 
3220 	lan78xx_terminate_urbs(dev);
3221 
3222 	netif_info(dev, ifdown, dev->net,
3223 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3224 		   net->stats.rx_packets, net->stats.tx_packets,
3225 		   net->stats.rx_errors, net->stats.tx_errors);
3226 
3227 	/* ignore errors that occur stopping the Tx and Rx data paths */
3228 	lan78xx_stop_tx_path(dev);
3229 	lan78xx_stop_rx_path(dev);
3230 
3231 	if (net->phydev)
3232 		phy_stop(net->phydev);
3233 
3234 	usb_kill_urb(dev->urb_intr);
3235 
3236 	/* deferred work (task, timer, softirq) must also stop.
3237 	 * can't flush_scheduled_work() until we drop rtnl (later),
3238 	 * else workers could deadlock; so make workers a NOP.
3239 	 */
3240 	clear_bit(EVENT_TX_HALT, &dev->flags);
3241 	clear_bit(EVENT_RX_HALT, &dev->flags);
3242 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3243 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3244 
3245 	cancel_delayed_work_sync(&dev->wq);
3246 
3247 	usb_autopm_put_interface(dev->intf);
3248 
3249 	mutex_unlock(&dev->dev_mutex);
3250 
3251 	return 0;
3252 }
3253 
3254 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3255 			       struct sk_buff_head *list, enum skb_state state)
3256 {
3257 	unsigned long flags;
3258 	enum skb_state old_state;
3259 	struct skb_data *entry = (struct skb_data *)skb->cb;
3260 
3261 	spin_lock_irqsave(&list->lock, flags);
3262 	old_state = entry->state;
3263 	entry->state = state;
3264 
3265 	__skb_unlink(skb, list);
3266 	spin_unlock(&list->lock);
3267 	spin_lock(&dev->rxq_done.lock);
3268 
3269 	__skb_queue_tail(&dev->rxq_done, skb);
3270 	if (skb_queue_len(&dev->rxq_done) == 1)
3271 		napi_schedule(&dev->napi);
3272 
3273 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3274 
3275 	return old_state;
3276 }
3277 
3278 static void tx_complete(struct urb *urb)
3279 {
3280 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3281 	struct skb_data *entry = (struct skb_data *)skb->cb;
3282 	struct lan78xx_net *dev = entry->dev;
3283 
3284 	if (urb->status == 0) {
3285 		dev->net->stats.tx_packets += entry->num_of_packet;
3286 		dev->net->stats.tx_bytes += entry->length;
3287 	} else {
3288 		dev->net->stats.tx_errors += entry->num_of_packet;
3289 
3290 		switch (urb->status) {
3291 		case -EPIPE:
3292 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3293 			break;
3294 
3295 		/* software-driven interface shutdown */
3296 		case -ECONNRESET:
3297 		case -ESHUTDOWN:
3298 			netif_dbg(dev, tx_err, dev->net,
3299 				  "tx err interface gone %d\n",
3300 				  entry->urb->status);
3301 			break;
3302 
3303 		case -EPROTO:
3304 		case -ETIME:
3305 		case -EILSEQ:
3306 			netif_stop_queue(dev->net);
3307 			netif_dbg(dev, tx_err, dev->net,
3308 				  "tx err queue stopped %d\n",
3309 				  entry->urb->status);
3310 			break;
3311 		default:
3312 			netif_dbg(dev, tx_err, dev->net,
3313 				  "unknown tx err %d\n",
3314 				  entry->urb->status);
3315 			break;
3316 		}
3317 	}
3318 
3319 	usb_autopm_put_interface_async(dev->intf);
3320 
3321 	skb_unlink(skb, &dev->txq);
3322 
3323 	lan78xx_release_tx_buf(dev, skb);
3324 
3325 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3326 	 */
3327 	if (skb_queue_empty(&dev->txq) &&
3328 	    !skb_queue_empty(&dev->txq_pend))
3329 		napi_schedule(&dev->napi);
3330 }
3331 
3332 static void lan78xx_queue_skb(struct sk_buff_head *list,
3333 			      struct sk_buff *newsk, enum skb_state state)
3334 {
3335 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3336 
3337 	__skb_queue_tail(list, newsk);
3338 	entry->state = state;
3339 }
3340 
3341 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3342 {
3343 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3344 }
3345 
3346 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3347 {
3348 	return dev->tx_pend_data_len;
3349 }
3350 
3351 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3352 				    struct sk_buff *skb,
3353 				    unsigned int *tx_pend_data_len)
3354 {
3355 	unsigned long flags;
3356 
3357 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3358 
3359 	__skb_queue_tail(&dev->txq_pend, skb);
3360 
3361 	dev->tx_pend_data_len += skb->len;
3362 	*tx_pend_data_len = dev->tx_pend_data_len;
3363 
3364 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3365 }
3366 
3367 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3368 					 struct sk_buff *skb,
3369 					 unsigned int *tx_pend_data_len)
3370 {
3371 	unsigned long flags;
3372 
3373 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3374 
3375 	__skb_queue_head(&dev->txq_pend, skb);
3376 
3377 	dev->tx_pend_data_len += skb->len;
3378 	*tx_pend_data_len = dev->tx_pend_data_len;
3379 
3380 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3381 }
3382 
3383 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3384 				    struct sk_buff **skb,
3385 				    unsigned int *tx_pend_data_len)
3386 {
3387 	unsigned long flags;
3388 
3389 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3390 
3391 	*skb = __skb_dequeue(&dev->txq_pend);
3392 	if (*skb)
3393 		dev->tx_pend_data_len -= (*skb)->len;
3394 	*tx_pend_data_len = dev->tx_pend_data_len;
3395 
3396 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3397 }
3398 
3399 static netdev_tx_t
3400 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3401 {
3402 	struct lan78xx_net *dev = netdev_priv(net);
3403 	unsigned int tx_pend_data_len;
3404 
3405 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3406 		schedule_delayed_work(&dev->wq, 0);
3407 
3408 	skb_tx_timestamp(skb);
3409 
3410 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3411 
3412 	/* Set up a Tx URB if none is in progress */
3413 
3414 	if (skb_queue_empty(&dev->txq))
3415 		napi_schedule(&dev->napi);
3416 
3417 	/* Stop stack Tx queue if we have enough data to fill
3418 	 * all the free Tx URBs.
3419 	 */
3420 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3421 		netif_stop_queue(net);
3422 
3423 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3424 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3425 
3426 		/* Kick off transmission of pending data */
3427 
3428 		if (!skb_queue_empty(&dev->txq_free))
3429 			napi_schedule(&dev->napi);
3430 	}
3431 
3432 	return NETDEV_TX_OK;
3433 }
3434 
3435 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3436 {
3437 	struct lan78xx_priv *pdata = NULL;
3438 	int ret;
3439 	int i;
3440 
3441 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3442 
3443 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3444 	if (!pdata) {
3445 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3446 		return -ENOMEM;
3447 	}
3448 
3449 	pdata->dev = dev;
3450 
3451 	spin_lock_init(&pdata->rfe_ctl_lock);
3452 	mutex_init(&pdata->dataport_mutex);
3453 
3454 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3455 
3456 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3457 		pdata->vlan_table[i] = 0;
3458 
3459 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3460 
3461 	dev->net->features = 0;
3462 
3463 	if (DEFAULT_TX_CSUM_ENABLE)
3464 		dev->net->features |= NETIF_F_HW_CSUM;
3465 
3466 	if (DEFAULT_RX_CSUM_ENABLE)
3467 		dev->net->features |= NETIF_F_RXCSUM;
3468 
3469 	if (DEFAULT_TSO_CSUM_ENABLE)
3470 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3471 
3472 	if (DEFAULT_VLAN_RX_OFFLOAD)
3473 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3474 
3475 	if (DEFAULT_VLAN_FILTER_ENABLE)
3476 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3477 
3478 	dev->net->hw_features = dev->net->features;
3479 
3480 	ret = lan78xx_setup_irq_domain(dev);
3481 	if (ret < 0) {
3482 		netdev_warn(dev->net,
3483 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3484 		goto out1;
3485 	}
3486 
3487 	/* Init all registers */
3488 	ret = lan78xx_reset(dev);
3489 	if (ret) {
3490 		netdev_warn(dev->net, "Registers INIT FAILED....");
3491 		goto out2;
3492 	}
3493 
3494 	ret = lan78xx_mdio_init(dev);
3495 	if (ret) {
3496 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3497 		goto out2;
3498 	}
3499 
3500 	dev->net->flags |= IFF_MULTICAST;
3501 
3502 	pdata->wol = WAKE_MAGIC;
3503 
3504 	return ret;
3505 
3506 out2:
3507 	lan78xx_remove_irq_domain(dev);
3508 
3509 out1:
3510 	netdev_warn(dev->net, "Bind routine FAILED");
3511 	cancel_work_sync(&pdata->set_multicast);
3512 	cancel_work_sync(&pdata->set_vlan);
3513 	kfree(pdata);
3514 	return ret;
3515 }
3516 
3517 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3518 {
3519 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3520 
3521 	lan78xx_remove_irq_domain(dev);
3522 
3523 	lan78xx_remove_mdio(dev);
3524 
3525 	if (pdata) {
3526 		cancel_work_sync(&pdata->set_multicast);
3527 		cancel_work_sync(&pdata->set_vlan);
3528 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3529 		kfree(pdata);
3530 		pdata = NULL;
3531 		dev->data[0] = 0;
3532 	}
3533 }
3534 
3535 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3536 				    struct sk_buff *skb,
3537 				    u32 rx_cmd_a, u32 rx_cmd_b)
3538 {
3539 	/* HW Checksum offload appears to be flawed if used when not stripping
3540 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3541 	 */
3542 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3543 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3544 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3545 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3546 		skb->ip_summed = CHECKSUM_NONE;
3547 	} else {
3548 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3549 		skb->ip_summed = CHECKSUM_COMPLETE;
3550 	}
3551 }
3552 
3553 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3554 				    struct sk_buff *skb,
3555 				    u32 rx_cmd_a, u32 rx_cmd_b)
3556 {
3557 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3558 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3559 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3560 				       (rx_cmd_b & 0xffff));
3561 }
3562 
3563 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3564 {
3565 	dev->net->stats.rx_packets++;
3566 	dev->net->stats.rx_bytes += skb->len;
3567 
3568 	skb->protocol = eth_type_trans(skb, dev->net);
3569 
3570 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3571 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3572 	memset(skb->cb, 0, sizeof(struct skb_data));
3573 
3574 	if (skb_defer_rx_timestamp(skb))
3575 		return;
3576 
3577 	napi_gro_receive(&dev->napi, skb);
3578 }
3579 
3580 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3581 		      int budget, int *work_done)
3582 {
3583 	if (skb->len < RX_SKB_MIN_LEN)
3584 		return 0;
3585 
3586 	/* Extract frames from the URB buffer and pass each one to
3587 	 * the stack in a new NAPI SKB.
3588 	 */
3589 	while (skb->len > 0) {
3590 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3591 		u16 rx_cmd_c;
3592 		unsigned char *packet;
3593 
3594 		rx_cmd_a = get_unaligned_le32(skb->data);
3595 		skb_pull(skb, sizeof(rx_cmd_a));
3596 
3597 		rx_cmd_b = get_unaligned_le32(skb->data);
3598 		skb_pull(skb, sizeof(rx_cmd_b));
3599 
3600 		rx_cmd_c = get_unaligned_le16(skb->data);
3601 		skb_pull(skb, sizeof(rx_cmd_c));
3602 
3603 		packet = skb->data;
3604 
3605 		/* get the packet length */
3606 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3607 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3608 
3609 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3610 			netif_dbg(dev, rx_err, dev->net,
3611 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3612 		} else {
3613 			u32 frame_len = size - ETH_FCS_LEN;
3614 			struct sk_buff *skb2;
3615 
3616 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3617 			if (!skb2)
3618 				return 0;
3619 
3620 			memcpy(skb2->data, packet, frame_len);
3621 
3622 			skb_put(skb2, frame_len);
3623 
3624 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3625 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3626 
3627 			/* Processing of the URB buffer must complete once
3628 			 * it has started. If the NAPI work budget is exhausted
3629 			 * while frames remain they are added to the overflow
3630 			 * queue for delivery in the next NAPI polling cycle.
3631 			 */
3632 			if (*work_done < budget) {
3633 				lan78xx_skb_return(dev, skb2);
3634 				++(*work_done);
3635 			} else {
3636 				skb_queue_tail(&dev->rxq_overflow, skb2);
3637 			}
3638 		}
3639 
3640 		skb_pull(skb, size);
3641 
3642 		/* skip padding bytes before the next frame starts */
3643 		if (skb->len)
3644 			skb_pull(skb, align_count);
3645 	}
3646 
3647 	return 1;
3648 }
3649 
3650 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3651 			      int budget, int *work_done)
3652 {
3653 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3654 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3655 		dev->net->stats.rx_errors++;
3656 	}
3657 }
3658 
3659 static void rx_complete(struct urb *urb)
3660 {
3661 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3662 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3663 	struct lan78xx_net *dev = entry->dev;
3664 	int urb_status = urb->status;
3665 	enum skb_state state;
3666 
3667 	netif_dbg(dev, rx_status, dev->net,
3668 		  "rx done: status %d", urb->status);
3669 
3670 	skb_put(skb, urb->actual_length);
3671 	state = rx_done;
3672 
3673 	if (urb != entry->urb)
3674 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3675 
3676 	switch (urb_status) {
3677 	case 0:
3678 		if (skb->len < RX_SKB_MIN_LEN) {
3679 			state = rx_cleanup;
3680 			dev->net->stats.rx_errors++;
3681 			dev->net->stats.rx_length_errors++;
3682 			netif_dbg(dev, rx_err, dev->net,
3683 				  "rx length %d\n", skb->len);
3684 		}
3685 		usb_mark_last_busy(dev->udev);
3686 		break;
3687 	case -EPIPE:
3688 		dev->net->stats.rx_errors++;
3689 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3690 		fallthrough;
3691 	case -ECONNRESET:				/* async unlink */
3692 	case -ESHUTDOWN:				/* hardware gone */
3693 		netif_dbg(dev, ifdown, dev->net,
3694 			  "rx shutdown, code %d\n", urb_status);
3695 		state = rx_cleanup;
3696 		break;
3697 	case -EPROTO:
3698 	case -ETIME:
3699 	case -EILSEQ:
3700 		dev->net->stats.rx_errors++;
3701 		state = rx_cleanup;
3702 		break;
3703 
3704 	/* data overrun ... flush fifo? */
3705 	case -EOVERFLOW:
3706 		dev->net->stats.rx_over_errors++;
3707 		fallthrough;
3708 
3709 	default:
3710 		state = rx_cleanup;
3711 		dev->net->stats.rx_errors++;
3712 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3713 		break;
3714 	}
3715 
3716 	state = defer_bh(dev, skb, &dev->rxq, state);
3717 }
3718 
3719 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3720 {
3721 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3722 	size_t size = dev->rx_urb_size;
3723 	struct urb *urb = entry->urb;
3724 	unsigned long lockflags;
3725 	int ret = 0;
3726 
3727 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3728 			  skb->data, size, rx_complete, skb);
3729 
3730 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3731 
3732 	if (netif_device_present(dev->net) &&
3733 	    netif_running(dev->net) &&
3734 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3735 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3736 		ret = usb_submit_urb(urb, flags);
3737 		switch (ret) {
3738 		case 0:
3739 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3740 			break;
3741 		case -EPIPE:
3742 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3743 			break;
3744 		case -ENODEV:
3745 		case -ENOENT:
3746 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3747 			netif_device_detach(dev->net);
3748 			break;
3749 		case -EHOSTUNREACH:
3750 			ret = -ENOLINK;
3751 			napi_schedule(&dev->napi);
3752 			break;
3753 		default:
3754 			netif_dbg(dev, rx_err, dev->net,
3755 				  "rx submit, %d\n", ret);
3756 			napi_schedule(&dev->napi);
3757 			break;
3758 		}
3759 	} else {
3760 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3761 		ret = -ENOLINK;
3762 	}
3763 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3764 
3765 	if (ret)
3766 		lan78xx_release_rx_buf(dev, skb);
3767 
3768 	return ret;
3769 }
3770 
3771 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3772 {
3773 	struct sk_buff *rx_buf;
3774 
3775 	/* Ensure the maximum number of Rx URBs is submitted
3776 	 */
3777 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3778 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3779 			break;
3780 	}
3781 }
3782 
3783 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3784 				    struct sk_buff *rx_buf)
3785 {
3786 	/* reset SKB data pointers */
3787 
3788 	rx_buf->data = rx_buf->head;
3789 	skb_reset_tail_pointer(rx_buf);
3790 	rx_buf->len = 0;
3791 	rx_buf->data_len = 0;
3792 
3793 	rx_submit(dev, rx_buf, GFP_ATOMIC);
3794 }
3795 
3796 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3797 {
3798 	u32 tx_cmd_a;
3799 	u32 tx_cmd_b;
3800 
3801 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3802 
3803 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3804 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3805 
3806 	tx_cmd_b = 0;
3807 	if (skb_is_gso(skb)) {
3808 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3809 
3810 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3811 
3812 		tx_cmd_a |= TX_CMD_A_LSO_;
3813 	}
3814 
3815 	if (skb_vlan_tag_present(skb)) {
3816 		tx_cmd_a |= TX_CMD_A_IVTG_;
3817 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3818 	}
3819 
3820 	put_unaligned_le32(tx_cmd_a, buffer);
3821 	put_unaligned_le32(tx_cmd_b, buffer + 4);
3822 }
3823 
3824 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3825 					    struct sk_buff *tx_buf)
3826 {
3827 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3828 	int remain = dev->tx_urb_size;
3829 	u8 *tx_data = tx_buf->data;
3830 	u32 urb_len = 0;
3831 
3832 	entry->num_of_packet = 0;
3833 	entry->length = 0;
3834 
3835 	/* Work through the pending SKBs and copy the data of each SKB into
3836 	 * the URB buffer if there room for all the SKB data.
3837 	 *
3838 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3839 	 */
3840 	while (remain >= TX_SKB_MIN_LEN) {
3841 		unsigned int pending_bytes;
3842 		unsigned int align_bytes;
3843 		struct sk_buff *skb;
3844 		unsigned int len;
3845 
3846 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3847 
3848 		if (!skb)
3849 			break;
3850 
3851 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3852 			      TX_ALIGNMENT;
3853 		len = align_bytes + TX_CMD_LEN + skb->len;
3854 		if (len > remain) {
3855 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3856 			break;
3857 		}
3858 
3859 		tx_data += align_bytes;
3860 
3861 		lan78xx_fill_tx_cmd_words(skb, tx_data);
3862 		tx_data += TX_CMD_LEN;
3863 
3864 		len = skb->len;
3865 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3866 			struct net_device_stats *stats = &dev->net->stats;
3867 
3868 			stats->tx_dropped++;
3869 			dev_kfree_skb_any(skb);
3870 			tx_data -= TX_CMD_LEN;
3871 			continue;
3872 		}
3873 
3874 		tx_data += len;
3875 		entry->length += len;
3876 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3877 
3878 		dev_kfree_skb_any(skb);
3879 
3880 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3881 
3882 		remain = dev->tx_urb_size - urb_len;
3883 	}
3884 
3885 	skb_put(tx_buf, urb_len);
3886 
3887 	return entry;
3888 }
3889 
3890 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3891 {
3892 	int ret;
3893 
3894 	/* Start the stack Tx queue if it was stopped
3895 	 */
3896 	netif_tx_lock(dev->net);
3897 	if (netif_queue_stopped(dev->net)) {
3898 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3899 			netif_wake_queue(dev->net);
3900 	}
3901 	netif_tx_unlock(dev->net);
3902 
3903 	/* Go through the Tx pending queue and set up URBs to transfer
3904 	 * the data to the device. Stop if no more pending data or URBs,
3905 	 * or if an error occurs when a URB is submitted.
3906 	 */
3907 	do {
3908 		struct skb_data *entry;
3909 		struct sk_buff *tx_buf;
3910 		unsigned long flags;
3911 
3912 		if (skb_queue_empty(&dev->txq_pend))
3913 			break;
3914 
3915 		tx_buf = lan78xx_get_tx_buf(dev);
3916 		if (!tx_buf)
3917 			break;
3918 
3919 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
3920 
3921 		spin_lock_irqsave(&dev->txq.lock, flags);
3922 		ret = usb_autopm_get_interface_async(dev->intf);
3923 		if (ret < 0) {
3924 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3925 			goto out;
3926 		}
3927 
3928 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
3929 				  tx_buf->data, tx_buf->len, tx_complete,
3930 				  tx_buf);
3931 
3932 		if (tx_buf->len % dev->maxpacket == 0) {
3933 			/* send USB_ZERO_PACKET */
3934 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
3935 		}
3936 
3937 #ifdef CONFIG_PM
3938 		/* if device is asleep stop outgoing packet processing */
3939 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3940 			usb_anchor_urb(entry->urb, &dev->deferred);
3941 			netif_stop_queue(dev->net);
3942 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3943 			netdev_dbg(dev->net,
3944 				   "Delaying transmission for resumption\n");
3945 			return;
3946 		}
3947 #endif
3948 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
3949 		switch (ret) {
3950 		case 0:
3951 			netif_trans_update(dev->net);
3952 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
3953 			break;
3954 		case -EPIPE:
3955 			netif_stop_queue(dev->net);
3956 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3957 			usb_autopm_put_interface_async(dev->intf);
3958 			break;
3959 		case -ENODEV:
3960 		case -ENOENT:
3961 			netif_dbg(dev, tx_err, dev->net,
3962 				  "tx submit urb err %d (disconnected?)", ret);
3963 			netif_device_detach(dev->net);
3964 			break;
3965 		default:
3966 			usb_autopm_put_interface_async(dev->intf);
3967 			netif_dbg(dev, tx_err, dev->net,
3968 				  "tx submit urb err %d\n", ret);
3969 			break;
3970 		}
3971 
3972 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3973 
3974 		if (ret) {
3975 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
3976 out:
3977 			dev->net->stats.tx_dropped += entry->num_of_packet;
3978 			lan78xx_release_tx_buf(dev, tx_buf);
3979 		}
3980 	} while (ret == 0);
3981 }
3982 
3983 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
3984 {
3985 	struct sk_buff_head done;
3986 	struct sk_buff *rx_buf;
3987 	struct skb_data *entry;
3988 	unsigned long flags;
3989 	int work_done = 0;
3990 
3991 	/* Pass frames received in the last NAPI cycle before
3992 	 * working on newly completed URBs.
3993 	 */
3994 	while (!skb_queue_empty(&dev->rxq_overflow)) {
3995 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
3996 		++work_done;
3997 	}
3998 
3999 	/* Take a snapshot of the done queue and move items to a
4000 	 * temporary queue. Rx URB completions will continue to add
4001 	 * to the done queue.
4002 	 */
4003 	__skb_queue_head_init(&done);
4004 
4005 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4006 	skb_queue_splice_init(&dev->rxq_done, &done);
4007 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4008 
4009 	/* Extract receive frames from completed URBs and
4010 	 * pass them to the stack. Re-submit each completed URB.
4011 	 */
4012 	while ((work_done < budget) &&
4013 	       (rx_buf = __skb_dequeue(&done))) {
4014 		entry = (struct skb_data *)(rx_buf->cb);
4015 		switch (entry->state) {
4016 		case rx_done:
4017 			rx_process(dev, rx_buf, budget, &work_done);
4018 			break;
4019 		case rx_cleanup:
4020 			break;
4021 		default:
4022 			netdev_dbg(dev->net, "rx buf state %d\n",
4023 				   entry->state);
4024 			break;
4025 		}
4026 
4027 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4028 	}
4029 
4030 	/* If budget was consumed before processing all the URBs put them
4031 	 * back on the front of the done queue. They will be first to be
4032 	 * processed in the next NAPI cycle.
4033 	 */
4034 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4035 	skb_queue_splice(&done, &dev->rxq_done);
4036 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4037 
4038 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4039 		/* reset update timer delta */
4040 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4041 			dev->delta = 1;
4042 			mod_timer(&dev->stat_monitor,
4043 				  jiffies + STAT_UPDATE_TIMER);
4044 		}
4045 
4046 		/* Submit all free Rx URBs */
4047 
4048 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4049 			lan78xx_rx_urb_submit_all(dev);
4050 
4051 		/* Submit new Tx URBs */
4052 
4053 		lan78xx_tx_bh(dev);
4054 	}
4055 
4056 	return work_done;
4057 }
4058 
4059 static int lan78xx_poll(struct napi_struct *napi, int budget)
4060 {
4061 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4062 	int result = budget;
4063 	int work_done;
4064 
4065 	/* Don't do any work if the device is suspended */
4066 
4067 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4068 		napi_complete_done(napi, 0);
4069 		return 0;
4070 	}
4071 
4072 	/* Process completed URBs and submit new URBs */
4073 
4074 	work_done = lan78xx_bh(dev, budget);
4075 
4076 	if (work_done < budget) {
4077 		napi_complete_done(napi, work_done);
4078 
4079 		/* Start a new polling cycle if data was received or
4080 		 * data is waiting to be transmitted.
4081 		 */
4082 		if (!skb_queue_empty(&dev->rxq_done)) {
4083 			napi_schedule(napi);
4084 		} else if (netif_carrier_ok(dev->net)) {
4085 			if (skb_queue_empty(&dev->txq) &&
4086 			    !skb_queue_empty(&dev->txq_pend)) {
4087 				napi_schedule(napi);
4088 			} else {
4089 				netif_tx_lock(dev->net);
4090 				if (netif_queue_stopped(dev->net)) {
4091 					netif_wake_queue(dev->net);
4092 					napi_schedule(napi);
4093 				}
4094 				netif_tx_unlock(dev->net);
4095 			}
4096 		}
4097 		result = work_done;
4098 	}
4099 
4100 	return result;
4101 }
4102 
4103 static void lan78xx_delayedwork(struct work_struct *work)
4104 {
4105 	int status;
4106 	struct lan78xx_net *dev;
4107 
4108 	dev = container_of(work, struct lan78xx_net, wq.work);
4109 
4110 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4111 		return;
4112 
4113 	if (usb_autopm_get_interface(dev->intf) < 0)
4114 		return;
4115 
4116 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4117 		unlink_urbs(dev, &dev->txq);
4118 
4119 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4120 		if (status < 0 &&
4121 		    status != -EPIPE &&
4122 		    status != -ESHUTDOWN) {
4123 			if (netif_msg_tx_err(dev))
4124 				netdev_err(dev->net,
4125 					   "can't clear tx halt, status %d\n",
4126 					   status);
4127 		} else {
4128 			clear_bit(EVENT_TX_HALT, &dev->flags);
4129 			if (status != -ESHUTDOWN)
4130 				netif_wake_queue(dev->net);
4131 		}
4132 	}
4133 
4134 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4135 		unlink_urbs(dev, &dev->rxq);
4136 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4137 		if (status < 0 &&
4138 		    status != -EPIPE &&
4139 		    status != -ESHUTDOWN) {
4140 			if (netif_msg_rx_err(dev))
4141 				netdev_err(dev->net,
4142 					   "can't clear rx halt, status %d\n",
4143 					   status);
4144 		} else {
4145 			clear_bit(EVENT_RX_HALT, &dev->flags);
4146 			napi_schedule(&dev->napi);
4147 		}
4148 	}
4149 
4150 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4151 		int ret = 0;
4152 
4153 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4154 		if (lan78xx_link_reset(dev) < 0) {
4155 			netdev_info(dev->net, "link reset failed (%d)\n",
4156 				    ret);
4157 		}
4158 	}
4159 
4160 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4161 		lan78xx_update_stats(dev);
4162 
4163 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4164 
4165 		mod_timer(&dev->stat_monitor,
4166 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4167 
4168 		dev->delta = min((dev->delta * 2), 50);
4169 	}
4170 
4171 	usb_autopm_put_interface(dev->intf);
4172 }
4173 
4174 static void intr_complete(struct urb *urb)
4175 {
4176 	struct lan78xx_net *dev = urb->context;
4177 	int status = urb->status;
4178 
4179 	switch (status) {
4180 	/* success */
4181 	case 0:
4182 		lan78xx_status(dev, urb);
4183 		break;
4184 
4185 	/* software-driven interface shutdown */
4186 	case -ENOENT:			/* urb killed */
4187 	case -ENODEV:			/* hardware gone */
4188 	case -ESHUTDOWN:		/* hardware gone */
4189 		netif_dbg(dev, ifdown, dev->net,
4190 			  "intr shutdown, code %d\n", status);
4191 		return;
4192 
4193 	/* NOTE:  not throttling like RX/TX, since this endpoint
4194 	 * already polls infrequently
4195 	 */
4196 	default:
4197 		netdev_dbg(dev->net, "intr status %d\n", status);
4198 		break;
4199 	}
4200 
4201 	if (!netif_device_present(dev->net) ||
4202 	    !netif_running(dev->net)) {
4203 		netdev_warn(dev->net, "not submitting new status URB");
4204 		return;
4205 	}
4206 
4207 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4208 	status = usb_submit_urb(urb, GFP_ATOMIC);
4209 
4210 	switch (status) {
4211 	case  0:
4212 		break;
4213 	case -ENODEV:
4214 	case -ENOENT:
4215 		netif_dbg(dev, timer, dev->net,
4216 			  "intr resubmit %d (disconnect?)", status);
4217 		netif_device_detach(dev->net);
4218 		break;
4219 	default:
4220 		netif_err(dev, timer, dev->net,
4221 			  "intr resubmit --> %d\n", status);
4222 		break;
4223 	}
4224 }
4225 
4226 static void lan78xx_disconnect(struct usb_interface *intf)
4227 {
4228 	struct lan78xx_net *dev;
4229 	struct usb_device *udev;
4230 	struct net_device *net;
4231 	struct phy_device *phydev;
4232 
4233 	dev = usb_get_intfdata(intf);
4234 	usb_set_intfdata(intf, NULL);
4235 	if (!dev)
4236 		return;
4237 
4238 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4239 
4240 	netif_napi_del(&dev->napi);
4241 
4242 	udev = interface_to_usbdev(intf);
4243 	net = dev->net;
4244 
4245 	unregister_netdev(net);
4246 
4247 	cancel_delayed_work_sync(&dev->wq);
4248 
4249 	phydev = net->phydev;
4250 
4251 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4252 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4253 
4254 	phy_disconnect(net->phydev);
4255 
4256 	if (phy_is_pseudo_fixed_link(phydev))
4257 		fixed_phy_unregister(phydev);
4258 
4259 	usb_scuttle_anchored_urbs(&dev->deferred);
4260 
4261 	if (timer_pending(&dev->stat_monitor))
4262 		del_timer_sync(&dev->stat_monitor);
4263 
4264 	lan78xx_unbind(dev, intf);
4265 
4266 	lan78xx_free_tx_resources(dev);
4267 	lan78xx_free_rx_resources(dev);
4268 
4269 	usb_kill_urb(dev->urb_intr);
4270 	usb_free_urb(dev->urb_intr);
4271 
4272 	free_netdev(net);
4273 	usb_put_dev(udev);
4274 }
4275 
4276 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4277 {
4278 	struct lan78xx_net *dev = netdev_priv(net);
4279 
4280 	unlink_urbs(dev, &dev->txq);
4281 	napi_schedule(&dev->napi);
4282 }
4283 
4284 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4285 						struct net_device *netdev,
4286 						netdev_features_t features)
4287 {
4288 	struct lan78xx_net *dev = netdev_priv(netdev);
4289 
4290 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4291 		features &= ~NETIF_F_GSO_MASK;
4292 
4293 	features = vlan_features_check(skb, features);
4294 	features = vxlan_features_check(skb, features);
4295 
4296 	return features;
4297 }
4298 
4299 static const struct net_device_ops lan78xx_netdev_ops = {
4300 	.ndo_open		= lan78xx_open,
4301 	.ndo_stop		= lan78xx_stop,
4302 	.ndo_start_xmit		= lan78xx_start_xmit,
4303 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4304 	.ndo_change_mtu		= lan78xx_change_mtu,
4305 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4306 	.ndo_validate_addr	= eth_validate_addr,
4307 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4308 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4309 	.ndo_set_features	= lan78xx_set_features,
4310 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4311 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4312 	.ndo_features_check	= lan78xx_features_check,
4313 };
4314 
4315 static void lan78xx_stat_monitor(struct timer_list *t)
4316 {
4317 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4318 
4319 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4320 }
4321 
4322 static int lan78xx_probe(struct usb_interface *intf,
4323 			 const struct usb_device_id *id)
4324 {
4325 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4326 	struct lan78xx_net *dev;
4327 	struct net_device *netdev;
4328 	struct usb_device *udev;
4329 	int ret;
4330 	unsigned int maxp;
4331 	unsigned int period;
4332 	u8 *buf = NULL;
4333 
4334 	udev = interface_to_usbdev(intf);
4335 	udev = usb_get_dev(udev);
4336 
4337 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4338 	if (!netdev) {
4339 		dev_err(&intf->dev, "Error: OOM\n");
4340 		ret = -ENOMEM;
4341 		goto out1;
4342 	}
4343 
4344 	/* netdev_printk() needs this */
4345 	SET_NETDEV_DEV(netdev, &intf->dev);
4346 
4347 	dev = netdev_priv(netdev);
4348 	dev->udev = udev;
4349 	dev->intf = intf;
4350 	dev->net = netdev;
4351 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4352 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4353 
4354 	skb_queue_head_init(&dev->rxq);
4355 	skb_queue_head_init(&dev->txq);
4356 	skb_queue_head_init(&dev->rxq_done);
4357 	skb_queue_head_init(&dev->txq_pend);
4358 	skb_queue_head_init(&dev->rxq_overflow);
4359 	mutex_init(&dev->phy_mutex);
4360 	mutex_init(&dev->dev_mutex);
4361 
4362 	ret = lan78xx_urb_config_init(dev);
4363 	if (ret < 0)
4364 		goto out2;
4365 
4366 	ret = lan78xx_alloc_tx_resources(dev);
4367 	if (ret < 0)
4368 		goto out2;
4369 
4370 	ret = lan78xx_alloc_rx_resources(dev);
4371 	if (ret < 0)
4372 		goto out3;
4373 
4374 	/* MTU range: 68 - 9000 */
4375 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4376 
4377 	netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4378 
4379 	netif_napi_add(netdev, &dev->napi, lan78xx_poll, LAN78XX_NAPI_WEIGHT);
4380 
4381 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4382 	init_usb_anchor(&dev->deferred);
4383 
4384 	netdev->netdev_ops = &lan78xx_netdev_ops;
4385 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4386 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4387 
4388 	dev->delta = 1;
4389 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4390 
4391 	mutex_init(&dev->stats.access_lock);
4392 
4393 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4394 		ret = -ENODEV;
4395 		goto out4;
4396 	}
4397 
4398 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4399 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4400 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4401 		ret = -ENODEV;
4402 		goto out4;
4403 	}
4404 
4405 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4406 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4407 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4408 		ret = -ENODEV;
4409 		goto out4;
4410 	}
4411 
4412 	ep_intr = &intf->cur_altsetting->endpoint[2];
4413 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4414 		ret = -ENODEV;
4415 		goto out4;
4416 	}
4417 
4418 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4419 					usb_endpoint_num(&ep_intr->desc));
4420 
4421 	ret = lan78xx_bind(dev, intf);
4422 	if (ret < 0)
4423 		goto out4;
4424 
4425 	period = ep_intr->desc.bInterval;
4426 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
4427 	buf = kmalloc(maxp, GFP_KERNEL);
4428 	if (!buf) {
4429 		ret = -ENOMEM;
4430 		goto out5;
4431 	}
4432 
4433 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4434 	if (!dev->urb_intr) {
4435 		ret = -ENOMEM;
4436 		goto out6;
4437 	} else {
4438 		usb_fill_int_urb(dev->urb_intr, dev->udev,
4439 				 dev->pipe_intr, buf, maxp,
4440 				 intr_complete, dev, period);
4441 		dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4442 	}
4443 
4444 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4445 
4446 	/* Reject broken descriptors. */
4447 	if (dev->maxpacket == 0) {
4448 		ret = -ENODEV;
4449 		goto out6;
4450 	}
4451 
4452 	/* driver requires remote-wakeup capability during autosuspend. */
4453 	intf->needs_remote_wakeup = 1;
4454 
4455 	ret = lan78xx_phy_init(dev);
4456 	if (ret < 0)
4457 		goto out7;
4458 
4459 	ret = register_netdev(netdev);
4460 	if (ret != 0) {
4461 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4462 		goto out8;
4463 	}
4464 
4465 	usb_set_intfdata(intf, dev);
4466 
4467 	ret = device_set_wakeup_enable(&udev->dev, true);
4468 
4469 	 /* Default delay of 2sec has more overhead than advantage.
4470 	  * Set to 10sec as default.
4471 	  */
4472 	pm_runtime_set_autosuspend_delay(&udev->dev,
4473 					 DEFAULT_AUTOSUSPEND_DELAY);
4474 
4475 	return 0;
4476 
4477 out8:
4478 	phy_disconnect(netdev->phydev);
4479 out7:
4480 	usb_free_urb(dev->urb_intr);
4481 out6:
4482 	kfree(buf);
4483 out5:
4484 	lan78xx_unbind(dev, intf);
4485 out4:
4486 	netif_napi_del(&dev->napi);
4487 	lan78xx_free_rx_resources(dev);
4488 out3:
4489 	lan78xx_free_tx_resources(dev);
4490 out2:
4491 	free_netdev(netdev);
4492 out1:
4493 	usb_put_dev(udev);
4494 
4495 	return ret;
4496 }
4497 
4498 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4499 {
4500 	const u16 crc16poly = 0x8005;
4501 	int i;
4502 	u16 bit, crc, msb;
4503 	u8 data;
4504 
4505 	crc = 0xFFFF;
4506 	for (i = 0; i < len; i++) {
4507 		data = *buf++;
4508 		for (bit = 0; bit < 8; bit++) {
4509 			msb = crc >> 15;
4510 			crc <<= 1;
4511 
4512 			if (msb ^ (u16)(data & 1)) {
4513 				crc ^= crc16poly;
4514 				crc |= (u16)0x0001U;
4515 			}
4516 			data >>= 1;
4517 		}
4518 	}
4519 
4520 	return crc;
4521 }
4522 
4523 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4524 {
4525 	u32 buf;
4526 	int ret;
4527 
4528 	ret = lan78xx_stop_tx_path(dev);
4529 	if (ret < 0)
4530 		return ret;
4531 
4532 	ret = lan78xx_stop_rx_path(dev);
4533 	if (ret < 0)
4534 		return ret;
4535 
4536 	/* auto suspend (selective suspend) */
4537 
4538 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4539 	if (ret < 0)
4540 		return ret;
4541 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4542 	if (ret < 0)
4543 		return ret;
4544 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4545 	if (ret < 0)
4546 		return ret;
4547 
4548 	/* set goodframe wakeup */
4549 
4550 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4551 	if (ret < 0)
4552 		return ret;
4553 
4554 	buf |= WUCSR_RFE_WAKE_EN_;
4555 	buf |= WUCSR_STORE_WAKE_;
4556 
4557 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4558 	if (ret < 0)
4559 		return ret;
4560 
4561 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4562 	if (ret < 0)
4563 		return ret;
4564 
4565 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4566 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4567 	buf |= PMT_CTL_PHY_WAKE_EN_;
4568 	buf |= PMT_CTL_WOL_EN_;
4569 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4570 	buf |= PMT_CTL_SUS_MODE_3_;
4571 
4572 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4573 	if (ret < 0)
4574 		return ret;
4575 
4576 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4577 	if (ret < 0)
4578 		return ret;
4579 
4580 	buf |= PMT_CTL_WUPS_MASK_;
4581 
4582 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4583 	if (ret < 0)
4584 		return ret;
4585 
4586 	ret = lan78xx_start_rx_path(dev);
4587 
4588 	return ret;
4589 }
4590 
4591 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4592 {
4593 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4594 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4595 	const u8 arp_type[2] = { 0x08, 0x06 };
4596 	u32 temp_pmt_ctl;
4597 	int mask_index;
4598 	u32 temp_wucsr;
4599 	u32 buf;
4600 	u16 crc;
4601 	int ret;
4602 
4603 	ret = lan78xx_stop_tx_path(dev);
4604 	if (ret < 0)
4605 		return ret;
4606 	ret = lan78xx_stop_rx_path(dev);
4607 	if (ret < 0)
4608 		return ret;
4609 
4610 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4611 	if (ret < 0)
4612 		return ret;
4613 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4614 	if (ret < 0)
4615 		return ret;
4616 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4617 	if (ret < 0)
4618 		return ret;
4619 
4620 	temp_wucsr = 0;
4621 
4622 	temp_pmt_ctl = 0;
4623 
4624 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4625 	if (ret < 0)
4626 		return ret;
4627 
4628 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4629 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4630 
4631 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4632 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4633 		if (ret < 0)
4634 			return ret;
4635 	}
4636 
4637 	mask_index = 0;
4638 	if (wol & WAKE_PHY) {
4639 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4640 
4641 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4642 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4643 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4644 	}
4645 	if (wol & WAKE_MAGIC) {
4646 		temp_wucsr |= WUCSR_MPEN_;
4647 
4648 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4649 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4650 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4651 	}
4652 	if (wol & WAKE_BCAST) {
4653 		temp_wucsr |= WUCSR_BCST_EN_;
4654 
4655 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4656 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4657 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4658 	}
4659 	if (wol & WAKE_MCAST) {
4660 		temp_wucsr |= WUCSR_WAKE_EN_;
4661 
4662 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4663 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4664 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4665 					WUF_CFGX_EN_ |
4666 					WUF_CFGX_TYPE_MCAST_ |
4667 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4668 					(crc & WUF_CFGX_CRC16_MASK_));
4669 		if (ret < 0)
4670 			return ret;
4671 
4672 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4673 		if (ret < 0)
4674 			return ret;
4675 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4676 		if (ret < 0)
4677 			return ret;
4678 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4679 		if (ret < 0)
4680 			return ret;
4681 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4682 		if (ret < 0)
4683 			return ret;
4684 
4685 		mask_index++;
4686 
4687 		/* for IPv6 Multicast */
4688 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4689 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4690 					WUF_CFGX_EN_ |
4691 					WUF_CFGX_TYPE_MCAST_ |
4692 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4693 					(crc & WUF_CFGX_CRC16_MASK_));
4694 		if (ret < 0)
4695 			return ret;
4696 
4697 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4698 		if (ret < 0)
4699 			return ret;
4700 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4701 		if (ret < 0)
4702 			return ret;
4703 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4704 		if (ret < 0)
4705 			return ret;
4706 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4707 		if (ret < 0)
4708 			return ret;
4709 
4710 		mask_index++;
4711 
4712 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4713 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4714 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4715 	}
4716 	if (wol & WAKE_UCAST) {
4717 		temp_wucsr |= WUCSR_PFDA_EN_;
4718 
4719 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4720 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4721 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4722 	}
4723 	if (wol & WAKE_ARP) {
4724 		temp_wucsr |= WUCSR_WAKE_EN_;
4725 
4726 		/* set WUF_CFG & WUF_MASK
4727 		 * for packettype (offset 12,13) = ARP (0x0806)
4728 		 */
4729 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4730 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4731 					WUF_CFGX_EN_ |
4732 					WUF_CFGX_TYPE_ALL_ |
4733 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4734 					(crc & WUF_CFGX_CRC16_MASK_));
4735 		if (ret < 0)
4736 			return ret;
4737 
4738 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4739 		if (ret < 0)
4740 			return ret;
4741 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4742 		if (ret < 0)
4743 			return ret;
4744 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4745 		if (ret < 0)
4746 			return ret;
4747 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4748 		if (ret < 0)
4749 			return ret;
4750 
4751 		mask_index++;
4752 
4753 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4754 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4755 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4756 	}
4757 
4758 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4759 	if (ret < 0)
4760 		return ret;
4761 
4762 	/* when multiple WOL bits are set */
4763 	if (hweight_long((unsigned long)wol) > 1) {
4764 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4765 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4766 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4767 	}
4768 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4769 	if (ret < 0)
4770 		return ret;
4771 
4772 	/* clear WUPS */
4773 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4774 	if (ret < 0)
4775 		return ret;
4776 
4777 	buf |= PMT_CTL_WUPS_MASK_;
4778 
4779 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4780 	if (ret < 0)
4781 		return ret;
4782 
4783 	ret = lan78xx_start_rx_path(dev);
4784 
4785 	return ret;
4786 }
4787 
4788 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4789 {
4790 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4791 	bool dev_open;
4792 	int ret;
4793 
4794 	mutex_lock(&dev->dev_mutex);
4795 
4796 	netif_dbg(dev, ifdown, dev->net,
4797 		  "suspending: pm event %#x", message.event);
4798 
4799 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4800 
4801 	if (dev_open) {
4802 		spin_lock_irq(&dev->txq.lock);
4803 		/* don't autosuspend while transmitting */
4804 		if ((skb_queue_len(&dev->txq) ||
4805 		     skb_queue_len(&dev->txq_pend)) &&
4806 		    PMSG_IS_AUTO(message)) {
4807 			spin_unlock_irq(&dev->txq.lock);
4808 			ret = -EBUSY;
4809 			goto out;
4810 		} else {
4811 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4812 			spin_unlock_irq(&dev->txq.lock);
4813 		}
4814 
4815 		/* stop RX */
4816 		ret = lan78xx_stop_rx_path(dev);
4817 		if (ret < 0)
4818 			goto out;
4819 
4820 		ret = lan78xx_flush_rx_fifo(dev);
4821 		if (ret < 0)
4822 			goto out;
4823 
4824 		/* stop Tx */
4825 		ret = lan78xx_stop_tx_path(dev);
4826 		if (ret < 0)
4827 			goto out;
4828 
4829 		/* empty out the Rx and Tx queues */
4830 		netif_device_detach(dev->net);
4831 		lan78xx_terminate_urbs(dev);
4832 		usb_kill_urb(dev->urb_intr);
4833 
4834 		/* reattach */
4835 		netif_device_attach(dev->net);
4836 
4837 		del_timer(&dev->stat_monitor);
4838 
4839 		if (PMSG_IS_AUTO(message)) {
4840 			ret = lan78xx_set_auto_suspend(dev);
4841 			if (ret < 0)
4842 				goto out;
4843 		} else {
4844 			struct lan78xx_priv *pdata;
4845 
4846 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4847 			netif_carrier_off(dev->net);
4848 			ret = lan78xx_set_suspend(dev, pdata->wol);
4849 			if (ret < 0)
4850 				goto out;
4851 		}
4852 	} else {
4853 		/* Interface is down; don't allow WOL and PHY
4854 		 * events to wake up the host
4855 		 */
4856 		u32 buf;
4857 
4858 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4859 
4860 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4861 		if (ret < 0)
4862 			goto out;
4863 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4864 		if (ret < 0)
4865 			goto out;
4866 
4867 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4868 		if (ret < 0)
4869 			goto out;
4870 
4871 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4872 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4873 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4874 		buf |= PMT_CTL_SUS_MODE_3_;
4875 
4876 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4877 		if (ret < 0)
4878 			goto out;
4879 
4880 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4881 		if (ret < 0)
4882 			goto out;
4883 
4884 		buf |= PMT_CTL_WUPS_MASK_;
4885 
4886 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4887 		if (ret < 0)
4888 			goto out;
4889 	}
4890 
4891 	ret = 0;
4892 out:
4893 	mutex_unlock(&dev->dev_mutex);
4894 
4895 	return ret;
4896 }
4897 
4898 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4899 {
4900 	bool pipe_halted = false;
4901 	struct urb *urb;
4902 
4903 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4904 		struct sk_buff *skb = urb->context;
4905 		int ret;
4906 
4907 		if (!netif_device_present(dev->net) ||
4908 		    !netif_carrier_ok(dev->net) ||
4909 		    pipe_halted) {
4910 			lan78xx_release_tx_buf(dev, skb);
4911 			continue;
4912 		}
4913 
4914 		ret = usb_submit_urb(urb, GFP_ATOMIC);
4915 
4916 		if (ret == 0) {
4917 			netif_trans_update(dev->net);
4918 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4919 		} else {
4920 			if (ret == -EPIPE) {
4921 				netif_stop_queue(dev->net);
4922 				pipe_halted = true;
4923 			} else if (ret == -ENODEV) {
4924 				netif_device_detach(dev->net);
4925 			}
4926 
4927 			lan78xx_release_tx_buf(dev, skb);
4928 		}
4929 	}
4930 
4931 	return pipe_halted;
4932 }
4933 
4934 static int lan78xx_resume(struct usb_interface *intf)
4935 {
4936 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4937 	bool dev_open;
4938 	int ret;
4939 
4940 	mutex_lock(&dev->dev_mutex);
4941 
4942 	netif_dbg(dev, ifup, dev->net, "resuming device");
4943 
4944 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4945 
4946 	if (dev_open) {
4947 		bool pipe_halted = false;
4948 
4949 		ret = lan78xx_flush_tx_fifo(dev);
4950 		if (ret < 0)
4951 			goto out;
4952 
4953 		if (dev->urb_intr) {
4954 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4955 
4956 			if (ret < 0) {
4957 				if (ret == -ENODEV)
4958 					netif_device_detach(dev->net);
4959 				netdev_warn(dev->net, "Failed to submit intr URB");
4960 			}
4961 		}
4962 
4963 		spin_lock_irq(&dev->txq.lock);
4964 
4965 		if (netif_device_present(dev->net)) {
4966 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
4967 
4968 			if (pipe_halted)
4969 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4970 		}
4971 
4972 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4973 
4974 		spin_unlock_irq(&dev->txq.lock);
4975 
4976 		if (!pipe_halted &&
4977 		    netif_device_present(dev->net) &&
4978 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
4979 			netif_start_queue(dev->net);
4980 
4981 		ret = lan78xx_start_tx_path(dev);
4982 		if (ret < 0)
4983 			goto out;
4984 
4985 		napi_schedule(&dev->napi);
4986 
4987 		if (!timer_pending(&dev->stat_monitor)) {
4988 			dev->delta = 1;
4989 			mod_timer(&dev->stat_monitor,
4990 				  jiffies + STAT_UPDATE_TIMER);
4991 		}
4992 
4993 	} else {
4994 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4995 	}
4996 
4997 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4998 	if (ret < 0)
4999 		goto out;
5000 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5001 	if (ret < 0)
5002 		goto out;
5003 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5004 	if (ret < 0)
5005 		goto out;
5006 
5007 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5008 					     WUCSR2_ARP_RCD_ |
5009 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5010 					     WUCSR2_IPV4_TCPSYN_RCD_);
5011 	if (ret < 0)
5012 		goto out;
5013 
5014 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5015 					    WUCSR_EEE_RX_WAKE_ |
5016 					    WUCSR_PFDA_FR_ |
5017 					    WUCSR_RFE_WAKE_FR_ |
5018 					    WUCSR_WUFR_ |
5019 					    WUCSR_MPR_ |
5020 					    WUCSR_BCST_FR_);
5021 	if (ret < 0)
5022 		goto out;
5023 
5024 	ret = 0;
5025 out:
5026 	mutex_unlock(&dev->dev_mutex);
5027 
5028 	return ret;
5029 }
5030 
5031 static int lan78xx_reset_resume(struct usb_interface *intf)
5032 {
5033 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5034 	int ret;
5035 
5036 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5037 
5038 	ret = lan78xx_reset(dev);
5039 	if (ret < 0)
5040 		return ret;
5041 
5042 	phy_start(dev->net->phydev);
5043 
5044 	ret = lan78xx_resume(intf);
5045 
5046 	return ret;
5047 }
5048 
5049 static const struct usb_device_id products[] = {
5050 	{
5051 	/* LAN7800 USB Gigabit Ethernet Device */
5052 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5053 	},
5054 	{
5055 	/* LAN7850 USB Gigabit Ethernet Device */
5056 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5057 	},
5058 	{
5059 	/* LAN7801 USB Gigabit Ethernet Device */
5060 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5061 	},
5062 	{
5063 	/* ATM2-AF USB Gigabit Ethernet Device */
5064 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5065 	},
5066 	{},
5067 };
5068 MODULE_DEVICE_TABLE(usb, products);
5069 
5070 static struct usb_driver lan78xx_driver = {
5071 	.name			= DRIVER_NAME,
5072 	.id_table		= products,
5073 	.probe			= lan78xx_probe,
5074 	.disconnect		= lan78xx_disconnect,
5075 	.suspend		= lan78xx_suspend,
5076 	.resume			= lan78xx_resume,
5077 	.reset_resume		= lan78xx_reset_resume,
5078 	.supports_autosuspend	= 1,
5079 	.disable_hub_initiated_lpm = 1,
5080 };
5081 
5082 module_usb_driver(lan78xx_driver);
5083 
5084 MODULE_AUTHOR(DRIVER_AUTHOR);
5085 MODULE_DESCRIPTION(DRIVER_DESC);
5086 MODULE_LICENSE("GPL");
5087