xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision 8ede5890)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define LAN78XX_NAPI_WEIGHT		64
96 
97 #define TX_URB_NUM			10
98 #define TX_SS_URB_NUM			TX_URB_NUM
99 #define TX_HS_URB_NUM			TX_URB_NUM
100 #define TX_FS_URB_NUM			TX_URB_NUM
101 
102 /* A single URB buffer must be large enough to hold a complete jumbo packet
103  */
104 #define TX_SS_URB_SIZE			(32 * 1024)
105 #define TX_HS_URB_SIZE			(16 * 1024)
106 #define TX_FS_URB_SIZE			(10 * 1024)
107 
108 #define RX_SS_URB_NUM			30
109 #define RX_HS_URB_NUM			10
110 #define RX_FS_URB_NUM			10
111 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
112 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
113 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
114 
115 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
116 #define SS_BULK_IN_DELAY		0x2000
117 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
118 #define HS_BULK_IN_DELAY		0x2000
119 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
120 #define FS_BULK_IN_DELAY		0x2000
121 
122 #define TX_CMD_LEN			8
123 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
124 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
125 
126 #define RX_CMD_LEN			10
127 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
128 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
129 
130 /* USB related defines */
131 #define BULK_IN_PIPE			1
132 #define BULK_OUT_PIPE			2
133 
134 /* default autosuspend delay (mSec)*/
135 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
136 
137 /* statistic update interval (mSec) */
138 #define STAT_UPDATE_TIMER		(1 * 1000)
139 
140 /* time to wait for MAC or FCT to stop (jiffies) */
141 #define HW_DISABLE_TIMEOUT		(HZ / 10)
142 
143 /* time to wait between polling MAC or FCT state (ms) */
144 #define HW_DISABLE_DELAY_MS		1
145 
146 /* defines interrupts from interrupt EP */
147 #define MAX_INT_EP			(32)
148 #define INT_EP_INTEP			(31)
149 #define INT_EP_OTP_WR_DONE		(28)
150 #define INT_EP_EEE_TX_LPI_START		(26)
151 #define INT_EP_EEE_TX_LPI_STOP		(25)
152 #define INT_EP_EEE_RX_LPI		(24)
153 #define INT_EP_MAC_RESET_TIMEOUT	(23)
154 #define INT_EP_RDFO			(22)
155 #define INT_EP_TXE			(21)
156 #define INT_EP_USB_STATUS		(20)
157 #define INT_EP_TX_DIS			(19)
158 #define INT_EP_RX_DIS			(18)
159 #define INT_EP_PHY			(17)
160 #define INT_EP_DP			(16)
161 #define INT_EP_MAC_ERR			(15)
162 #define INT_EP_TDFU			(14)
163 #define INT_EP_TDFO			(13)
164 #define INT_EP_UTX			(12)
165 #define INT_EP_GPIO_11			(11)
166 #define INT_EP_GPIO_10			(10)
167 #define INT_EP_GPIO_9			(9)
168 #define INT_EP_GPIO_8			(8)
169 #define INT_EP_GPIO_7			(7)
170 #define INT_EP_GPIO_6			(6)
171 #define INT_EP_GPIO_5			(5)
172 #define INT_EP_GPIO_4			(4)
173 #define INT_EP_GPIO_3			(3)
174 #define INT_EP_GPIO_2			(2)
175 #define INT_EP_GPIO_1			(1)
176 #define INT_EP_GPIO_0			(0)
177 
178 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
179 	"RX FCS Errors",
180 	"RX Alignment Errors",
181 	"Rx Fragment Errors",
182 	"RX Jabber Errors",
183 	"RX Undersize Frame Errors",
184 	"RX Oversize Frame Errors",
185 	"RX Dropped Frames",
186 	"RX Unicast Byte Count",
187 	"RX Broadcast Byte Count",
188 	"RX Multicast Byte Count",
189 	"RX Unicast Frames",
190 	"RX Broadcast Frames",
191 	"RX Multicast Frames",
192 	"RX Pause Frames",
193 	"RX 64 Byte Frames",
194 	"RX 65 - 127 Byte Frames",
195 	"RX 128 - 255 Byte Frames",
196 	"RX 256 - 511 Bytes Frames",
197 	"RX 512 - 1023 Byte Frames",
198 	"RX 1024 - 1518 Byte Frames",
199 	"RX Greater 1518 Byte Frames",
200 	"EEE RX LPI Transitions",
201 	"EEE RX LPI Time",
202 	"TX FCS Errors",
203 	"TX Excess Deferral Errors",
204 	"TX Carrier Errors",
205 	"TX Bad Byte Count",
206 	"TX Single Collisions",
207 	"TX Multiple Collisions",
208 	"TX Excessive Collision",
209 	"TX Late Collisions",
210 	"TX Unicast Byte Count",
211 	"TX Broadcast Byte Count",
212 	"TX Multicast Byte Count",
213 	"TX Unicast Frames",
214 	"TX Broadcast Frames",
215 	"TX Multicast Frames",
216 	"TX Pause Frames",
217 	"TX 64 Byte Frames",
218 	"TX 65 - 127 Byte Frames",
219 	"TX 128 - 255 Byte Frames",
220 	"TX 256 - 511 Bytes Frames",
221 	"TX 512 - 1023 Byte Frames",
222 	"TX 1024 - 1518 Byte Frames",
223 	"TX Greater 1518 Byte Frames",
224 	"EEE TX LPI Transitions",
225 	"EEE TX LPI Time",
226 };
227 
228 struct lan78xx_statstage {
229 	u32 rx_fcs_errors;
230 	u32 rx_alignment_errors;
231 	u32 rx_fragment_errors;
232 	u32 rx_jabber_errors;
233 	u32 rx_undersize_frame_errors;
234 	u32 rx_oversize_frame_errors;
235 	u32 rx_dropped_frames;
236 	u32 rx_unicast_byte_count;
237 	u32 rx_broadcast_byte_count;
238 	u32 rx_multicast_byte_count;
239 	u32 rx_unicast_frames;
240 	u32 rx_broadcast_frames;
241 	u32 rx_multicast_frames;
242 	u32 rx_pause_frames;
243 	u32 rx_64_byte_frames;
244 	u32 rx_65_127_byte_frames;
245 	u32 rx_128_255_byte_frames;
246 	u32 rx_256_511_bytes_frames;
247 	u32 rx_512_1023_byte_frames;
248 	u32 rx_1024_1518_byte_frames;
249 	u32 rx_greater_1518_byte_frames;
250 	u32 eee_rx_lpi_transitions;
251 	u32 eee_rx_lpi_time;
252 	u32 tx_fcs_errors;
253 	u32 tx_excess_deferral_errors;
254 	u32 tx_carrier_errors;
255 	u32 tx_bad_byte_count;
256 	u32 tx_single_collisions;
257 	u32 tx_multiple_collisions;
258 	u32 tx_excessive_collision;
259 	u32 tx_late_collisions;
260 	u32 tx_unicast_byte_count;
261 	u32 tx_broadcast_byte_count;
262 	u32 tx_multicast_byte_count;
263 	u32 tx_unicast_frames;
264 	u32 tx_broadcast_frames;
265 	u32 tx_multicast_frames;
266 	u32 tx_pause_frames;
267 	u32 tx_64_byte_frames;
268 	u32 tx_65_127_byte_frames;
269 	u32 tx_128_255_byte_frames;
270 	u32 tx_256_511_bytes_frames;
271 	u32 tx_512_1023_byte_frames;
272 	u32 tx_1024_1518_byte_frames;
273 	u32 tx_greater_1518_byte_frames;
274 	u32 eee_tx_lpi_transitions;
275 	u32 eee_tx_lpi_time;
276 };
277 
278 struct lan78xx_statstage64 {
279 	u64 rx_fcs_errors;
280 	u64 rx_alignment_errors;
281 	u64 rx_fragment_errors;
282 	u64 rx_jabber_errors;
283 	u64 rx_undersize_frame_errors;
284 	u64 rx_oversize_frame_errors;
285 	u64 rx_dropped_frames;
286 	u64 rx_unicast_byte_count;
287 	u64 rx_broadcast_byte_count;
288 	u64 rx_multicast_byte_count;
289 	u64 rx_unicast_frames;
290 	u64 rx_broadcast_frames;
291 	u64 rx_multicast_frames;
292 	u64 rx_pause_frames;
293 	u64 rx_64_byte_frames;
294 	u64 rx_65_127_byte_frames;
295 	u64 rx_128_255_byte_frames;
296 	u64 rx_256_511_bytes_frames;
297 	u64 rx_512_1023_byte_frames;
298 	u64 rx_1024_1518_byte_frames;
299 	u64 rx_greater_1518_byte_frames;
300 	u64 eee_rx_lpi_transitions;
301 	u64 eee_rx_lpi_time;
302 	u64 tx_fcs_errors;
303 	u64 tx_excess_deferral_errors;
304 	u64 tx_carrier_errors;
305 	u64 tx_bad_byte_count;
306 	u64 tx_single_collisions;
307 	u64 tx_multiple_collisions;
308 	u64 tx_excessive_collision;
309 	u64 tx_late_collisions;
310 	u64 tx_unicast_byte_count;
311 	u64 tx_broadcast_byte_count;
312 	u64 tx_multicast_byte_count;
313 	u64 tx_unicast_frames;
314 	u64 tx_broadcast_frames;
315 	u64 tx_multicast_frames;
316 	u64 tx_pause_frames;
317 	u64 tx_64_byte_frames;
318 	u64 tx_65_127_byte_frames;
319 	u64 tx_128_255_byte_frames;
320 	u64 tx_256_511_bytes_frames;
321 	u64 tx_512_1023_byte_frames;
322 	u64 tx_1024_1518_byte_frames;
323 	u64 tx_greater_1518_byte_frames;
324 	u64 eee_tx_lpi_transitions;
325 	u64 eee_tx_lpi_time;
326 };
327 
328 static u32 lan78xx_regs[] = {
329 	ID_REV,
330 	INT_STS,
331 	HW_CFG,
332 	PMT_CTL,
333 	E2P_CMD,
334 	E2P_DATA,
335 	USB_STATUS,
336 	VLAN_TYPE,
337 	MAC_CR,
338 	MAC_RX,
339 	MAC_TX,
340 	FLOW,
341 	ERR_STS,
342 	MII_ACC,
343 	MII_DATA,
344 	EEE_TX_LPI_REQ_DLY,
345 	EEE_TW_TX_SYS,
346 	EEE_TX_LPI_REM_DLY,
347 	WUCSR
348 };
349 
350 #define PHY_REG_SIZE (32 * sizeof(u32))
351 
352 struct lan78xx_net;
353 
354 struct lan78xx_priv {
355 	struct lan78xx_net *dev;
356 	u32 rfe_ctl;
357 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
358 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
359 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
360 	struct mutex dataport_mutex; /* for dataport access */
361 	spinlock_t rfe_ctl_lock; /* for rfe register access */
362 	struct work_struct set_multicast;
363 	struct work_struct set_vlan;
364 	u32 wol;
365 };
366 
367 enum skb_state {
368 	illegal = 0,
369 	tx_start,
370 	tx_done,
371 	rx_start,
372 	rx_done,
373 	rx_cleanup,
374 	unlink_start
375 };
376 
377 struct skb_data {		/* skb->cb is one of these */
378 	struct urb *urb;
379 	struct lan78xx_net *dev;
380 	enum skb_state state;
381 	size_t length;
382 	int num_of_packet;
383 };
384 
385 struct usb_context {
386 	struct usb_ctrlrequest req;
387 	struct lan78xx_net *dev;
388 };
389 
390 #define EVENT_TX_HALT			0
391 #define EVENT_RX_HALT			1
392 #define EVENT_RX_MEMORY			2
393 #define EVENT_STS_SPLIT			3
394 #define EVENT_LINK_RESET		4
395 #define EVENT_RX_PAUSED			5
396 #define EVENT_DEV_WAKING		6
397 #define EVENT_DEV_ASLEEP		7
398 #define EVENT_DEV_OPEN			8
399 #define EVENT_STAT_UPDATE		9
400 #define EVENT_DEV_DISCONNECT		10
401 
402 struct statstage {
403 	struct mutex			access_lock;	/* for stats access */
404 	struct lan78xx_statstage	saved;
405 	struct lan78xx_statstage	rollover_count;
406 	struct lan78xx_statstage	rollover_max;
407 	struct lan78xx_statstage64	curr_stat;
408 };
409 
410 struct irq_domain_data {
411 	struct irq_domain	*irqdomain;
412 	unsigned int		phyirq;
413 	struct irq_chip		*irqchip;
414 	irq_flow_handler_t	irq_handler;
415 	u32			irqenable;
416 	struct mutex		irq_lock;		/* for irq bus access */
417 };
418 
419 struct lan78xx_net {
420 	struct net_device	*net;
421 	struct usb_device	*udev;
422 	struct usb_interface	*intf;
423 	void			*driver_priv;
424 
425 	unsigned int		tx_pend_data_len;
426 	size_t			n_tx_urbs;
427 	size_t			n_rx_urbs;
428 	size_t			tx_urb_size;
429 	size_t			rx_urb_size;
430 
431 	struct sk_buff_head	rxq_free;
432 	struct sk_buff_head	rxq;
433 	struct sk_buff_head	rxq_done;
434 	struct sk_buff_head	rxq_overflow;
435 	struct sk_buff_head	txq_free;
436 	struct sk_buff_head	txq;
437 	struct sk_buff_head	txq_pend;
438 
439 	struct napi_struct	napi;
440 
441 	struct delayed_work	wq;
442 
443 	int			msg_enable;
444 
445 	struct urb		*urb_intr;
446 	struct usb_anchor	deferred;
447 
448 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
449 	struct mutex		phy_mutex; /* for phy access */
450 	unsigned int		pipe_in, pipe_out, pipe_intr;
451 
452 	unsigned int		bulk_in_delay;
453 	unsigned int		burst_cap;
454 
455 	unsigned long		flags;
456 
457 	wait_queue_head_t	*wait;
458 	unsigned char		suspend_count;
459 
460 	unsigned int		maxpacket;
461 	struct timer_list	stat_monitor;
462 
463 	unsigned long		data[5];
464 
465 	int			link_on;
466 	u8			mdix_ctrl;
467 
468 	u32			chipid;
469 	u32			chiprev;
470 	struct mii_bus		*mdiobus;
471 	phy_interface_t		interface;
472 
473 	int			fc_autoneg;
474 	u8			fc_request_control;
475 
476 	int			delta;
477 	struct statstage	stats;
478 
479 	struct irq_domain_data	domain_data;
480 };
481 
482 /* define external phy id */
483 #define	PHY_LAN8835			(0x0007C130)
484 #define	PHY_KSZ9031RNX			(0x00221620)
485 
486 /* use ethtool to change the level for any given device */
487 static int msg_level = -1;
488 module_param(msg_level, int, 0);
489 MODULE_PARM_DESC(msg_level, "Override default message level");
490 
491 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
492 {
493 	if (skb_queue_empty(buf_pool))
494 		return NULL;
495 
496 	return skb_dequeue(buf_pool);
497 }
498 
499 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
500 				struct sk_buff *buf)
501 {
502 	buf->data = buf->head;
503 	skb_reset_tail_pointer(buf);
504 
505 	buf->len = 0;
506 	buf->data_len = 0;
507 
508 	skb_queue_tail(buf_pool, buf);
509 }
510 
511 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
512 {
513 	struct skb_data *entry;
514 	struct sk_buff *buf;
515 
516 	while (!skb_queue_empty(buf_pool)) {
517 		buf = skb_dequeue(buf_pool);
518 		if (buf) {
519 			entry = (struct skb_data *)buf->cb;
520 			usb_free_urb(entry->urb);
521 			dev_kfree_skb_any(buf);
522 		}
523 	}
524 }
525 
526 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
527 				  size_t n_urbs, size_t urb_size,
528 				  struct lan78xx_net *dev)
529 {
530 	struct skb_data *entry;
531 	struct sk_buff *buf;
532 	struct urb *urb;
533 	int i;
534 
535 	skb_queue_head_init(buf_pool);
536 
537 	for (i = 0; i < n_urbs; i++) {
538 		buf = alloc_skb(urb_size, GFP_ATOMIC);
539 		if (!buf)
540 			goto error;
541 
542 		if (skb_linearize(buf) != 0) {
543 			dev_kfree_skb_any(buf);
544 			goto error;
545 		}
546 
547 		urb = usb_alloc_urb(0, GFP_ATOMIC);
548 		if (!urb) {
549 			dev_kfree_skb_any(buf);
550 			goto error;
551 		}
552 
553 		entry = (struct skb_data *)buf->cb;
554 		entry->urb = urb;
555 		entry->dev = dev;
556 		entry->length = 0;
557 		entry->num_of_packet = 0;
558 
559 		skb_queue_tail(buf_pool, buf);
560 	}
561 
562 	return 0;
563 
564 error:
565 	lan78xx_free_buf_pool(buf_pool);
566 
567 	return -ENOMEM;
568 }
569 
570 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
571 {
572 	return lan78xx_get_buf(&dev->rxq_free);
573 }
574 
575 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
576 				   struct sk_buff *rx_buf)
577 {
578 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
579 }
580 
581 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
582 {
583 	lan78xx_free_buf_pool(&dev->rxq_free);
584 }
585 
586 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
587 {
588 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
589 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
590 }
591 
592 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
593 {
594 	return lan78xx_get_buf(&dev->txq_free);
595 }
596 
597 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
598 				   struct sk_buff *tx_buf)
599 {
600 	lan78xx_release_buf(&dev->txq_free, tx_buf);
601 }
602 
603 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
604 {
605 	lan78xx_free_buf_pool(&dev->txq_free);
606 }
607 
608 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
609 {
610 	return lan78xx_alloc_buf_pool(&dev->txq_free,
611 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
612 }
613 
614 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
615 {
616 	u32 *buf;
617 	int ret;
618 
619 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
620 		return -ENODEV;
621 
622 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
623 	if (!buf)
624 		return -ENOMEM;
625 
626 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
627 			      USB_VENDOR_REQUEST_READ_REGISTER,
628 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
629 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
630 	if (likely(ret >= 0)) {
631 		le32_to_cpus(buf);
632 		*data = *buf;
633 	} else if (net_ratelimit()) {
634 		netdev_warn(dev->net,
635 			    "Failed to read register index 0x%08x. ret = %d",
636 			    index, ret);
637 	}
638 
639 	kfree(buf);
640 
641 	return ret;
642 }
643 
644 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
645 {
646 	u32 *buf;
647 	int ret;
648 
649 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
650 		return -ENODEV;
651 
652 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
653 	if (!buf)
654 		return -ENOMEM;
655 
656 	*buf = data;
657 	cpu_to_le32s(buf);
658 
659 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
660 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
661 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
662 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
663 	if (unlikely(ret < 0) &&
664 	    net_ratelimit()) {
665 		netdev_warn(dev->net,
666 			    "Failed to write register index 0x%08x. ret = %d",
667 			    index, ret);
668 	}
669 
670 	kfree(buf);
671 
672 	return ret;
673 }
674 
675 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
676 			      u32 data)
677 {
678 	int ret;
679 	u32 buf;
680 
681 	ret = lan78xx_read_reg(dev, reg, &buf);
682 	if (ret < 0)
683 		return ret;
684 
685 	buf &= ~mask;
686 	buf |= (mask & data);
687 
688 	ret = lan78xx_write_reg(dev, reg, buf);
689 	if (ret < 0)
690 		return ret;
691 
692 	return 0;
693 }
694 
695 static int lan78xx_read_stats(struct lan78xx_net *dev,
696 			      struct lan78xx_statstage *data)
697 {
698 	int ret = 0;
699 	int i;
700 	struct lan78xx_statstage *stats;
701 	u32 *src;
702 	u32 *dst;
703 
704 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
705 	if (!stats)
706 		return -ENOMEM;
707 
708 	ret = usb_control_msg(dev->udev,
709 			      usb_rcvctrlpipe(dev->udev, 0),
710 			      USB_VENDOR_REQUEST_GET_STATS,
711 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
712 			      0,
713 			      0,
714 			      (void *)stats,
715 			      sizeof(*stats),
716 			      USB_CTRL_SET_TIMEOUT);
717 	if (likely(ret >= 0)) {
718 		src = (u32 *)stats;
719 		dst = (u32 *)data;
720 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
721 			le32_to_cpus(&src[i]);
722 			dst[i] = src[i];
723 		}
724 	} else {
725 		netdev_warn(dev->net,
726 			    "Failed to read stat ret = %d", ret);
727 	}
728 
729 	kfree(stats);
730 
731 	return ret;
732 }
733 
734 #define check_counter_rollover(struct1, dev_stats, member)		\
735 	do {								\
736 		if ((struct1)->member < (dev_stats).saved.member)	\
737 			(dev_stats).rollover_count.member++;		\
738 	} while (0)
739 
740 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
741 					struct lan78xx_statstage *stats)
742 {
743 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
744 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
745 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
746 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
747 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
748 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
749 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
750 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
751 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
752 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
753 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
754 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
755 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
756 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
757 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
758 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
759 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
760 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
761 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
762 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
763 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
764 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
765 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
766 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
767 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
768 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
769 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
770 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
771 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
772 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
773 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
774 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
775 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
776 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
777 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
778 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
779 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
780 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
781 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
782 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
783 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
784 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
785 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
786 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
787 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
788 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
789 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
790 
791 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
792 }
793 
794 static void lan78xx_update_stats(struct lan78xx_net *dev)
795 {
796 	u32 *p, *count, *max;
797 	u64 *data;
798 	int i;
799 	struct lan78xx_statstage lan78xx_stats;
800 
801 	if (usb_autopm_get_interface(dev->intf) < 0)
802 		return;
803 
804 	p = (u32 *)&lan78xx_stats;
805 	count = (u32 *)&dev->stats.rollover_count;
806 	max = (u32 *)&dev->stats.rollover_max;
807 	data = (u64 *)&dev->stats.curr_stat;
808 
809 	mutex_lock(&dev->stats.access_lock);
810 
811 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
812 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
813 
814 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
815 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
816 
817 	mutex_unlock(&dev->stats.access_lock);
818 
819 	usb_autopm_put_interface(dev->intf);
820 }
821 
822 /* Loop until the read is completed with timeout called with phy_mutex held */
823 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
824 {
825 	unsigned long start_time = jiffies;
826 	u32 val;
827 	int ret;
828 
829 	do {
830 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
831 		if (unlikely(ret < 0))
832 			return -EIO;
833 
834 		if (!(val & MII_ACC_MII_BUSY_))
835 			return 0;
836 	} while (!time_after(jiffies, start_time + HZ));
837 
838 	return -EIO;
839 }
840 
841 static inline u32 mii_access(int id, int index, int read)
842 {
843 	u32 ret;
844 
845 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
846 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
847 	if (read)
848 		ret |= MII_ACC_MII_READ_;
849 	else
850 		ret |= MII_ACC_MII_WRITE_;
851 	ret |= MII_ACC_MII_BUSY_;
852 
853 	return ret;
854 }
855 
856 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
857 {
858 	unsigned long start_time = jiffies;
859 	u32 val;
860 	int ret;
861 
862 	do {
863 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
864 		if (unlikely(ret < 0))
865 			return -EIO;
866 
867 		if (!(val & E2P_CMD_EPC_BUSY_) ||
868 		    (val & E2P_CMD_EPC_TIMEOUT_))
869 			break;
870 		usleep_range(40, 100);
871 	} while (!time_after(jiffies, start_time + HZ));
872 
873 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
874 		netdev_warn(dev->net, "EEPROM read operation timeout");
875 		return -EIO;
876 	}
877 
878 	return 0;
879 }
880 
881 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
882 {
883 	unsigned long start_time = jiffies;
884 	u32 val;
885 	int ret;
886 
887 	do {
888 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
889 		if (unlikely(ret < 0))
890 			return -EIO;
891 
892 		if (!(val & E2P_CMD_EPC_BUSY_))
893 			return 0;
894 
895 		usleep_range(40, 100);
896 	} while (!time_after(jiffies, start_time + HZ));
897 
898 	netdev_warn(dev->net, "EEPROM is busy");
899 	return -EIO;
900 }
901 
902 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
903 				   u32 length, u8 *data)
904 {
905 	u32 val;
906 	u32 saved;
907 	int i, ret;
908 	int retval;
909 
910 	/* depends on chip, some EEPROM pins are muxed with LED function.
911 	 * disable & restore LED function to access EEPROM.
912 	 */
913 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
914 	saved = val;
915 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
916 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
917 		ret = lan78xx_write_reg(dev, HW_CFG, val);
918 	}
919 
920 	retval = lan78xx_eeprom_confirm_not_busy(dev);
921 	if (retval)
922 		return retval;
923 
924 	for (i = 0; i < length; i++) {
925 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
926 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
927 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
928 		if (unlikely(ret < 0)) {
929 			retval = -EIO;
930 			goto exit;
931 		}
932 
933 		retval = lan78xx_wait_eeprom(dev);
934 		if (retval < 0)
935 			goto exit;
936 
937 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
938 		if (unlikely(ret < 0)) {
939 			retval = -EIO;
940 			goto exit;
941 		}
942 
943 		data[i] = val & 0xFF;
944 		offset++;
945 	}
946 
947 	retval = 0;
948 exit:
949 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
950 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
951 
952 	return retval;
953 }
954 
955 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
956 			       u32 length, u8 *data)
957 {
958 	u8 sig;
959 	int ret;
960 
961 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
962 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
963 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
964 	else
965 		ret = -EINVAL;
966 
967 	return ret;
968 }
969 
970 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
971 				    u32 length, u8 *data)
972 {
973 	u32 val;
974 	u32 saved;
975 	int i, ret;
976 	int retval;
977 
978 	/* depends on chip, some EEPROM pins are muxed with LED function.
979 	 * disable & restore LED function to access EEPROM.
980 	 */
981 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
982 	saved = val;
983 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
984 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
985 		ret = lan78xx_write_reg(dev, HW_CFG, val);
986 	}
987 
988 	retval = lan78xx_eeprom_confirm_not_busy(dev);
989 	if (retval)
990 		goto exit;
991 
992 	/* Issue write/erase enable command */
993 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
994 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
995 	if (unlikely(ret < 0)) {
996 		retval = -EIO;
997 		goto exit;
998 	}
999 
1000 	retval = lan78xx_wait_eeprom(dev);
1001 	if (retval < 0)
1002 		goto exit;
1003 
1004 	for (i = 0; i < length; i++) {
1005 		/* Fill data register */
1006 		val = data[i];
1007 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1008 		if (ret < 0) {
1009 			retval = -EIO;
1010 			goto exit;
1011 		}
1012 
1013 		/* Send "write" command */
1014 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1015 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1016 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1017 		if (ret < 0) {
1018 			retval = -EIO;
1019 			goto exit;
1020 		}
1021 
1022 		retval = lan78xx_wait_eeprom(dev);
1023 		if (retval < 0)
1024 			goto exit;
1025 
1026 		offset++;
1027 	}
1028 
1029 	retval = 0;
1030 exit:
1031 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1032 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
1033 
1034 	return retval;
1035 }
1036 
1037 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1038 				u32 length, u8 *data)
1039 {
1040 	int i;
1041 	u32 buf;
1042 	unsigned long timeout;
1043 
1044 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1045 
1046 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1047 		/* clear it and wait to be cleared */
1048 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1049 
1050 		timeout = jiffies + HZ;
1051 		do {
1052 			usleep_range(1, 10);
1053 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1054 			if (time_after(jiffies, timeout)) {
1055 				netdev_warn(dev->net,
1056 					    "timeout on OTP_PWR_DN");
1057 				return -EIO;
1058 			}
1059 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1060 	}
1061 
1062 	for (i = 0; i < length; i++) {
1063 		lan78xx_write_reg(dev, OTP_ADDR1,
1064 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1065 		lan78xx_write_reg(dev, OTP_ADDR2,
1066 				  ((offset + i) & OTP_ADDR2_10_3));
1067 
1068 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1069 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1070 
1071 		timeout = jiffies + HZ;
1072 		do {
1073 			udelay(1);
1074 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1075 			if (time_after(jiffies, timeout)) {
1076 				netdev_warn(dev->net,
1077 					    "timeout on OTP_STATUS");
1078 				return -EIO;
1079 			}
1080 		} while (buf & OTP_STATUS_BUSY_);
1081 
1082 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1083 
1084 		data[i] = (u8)(buf & 0xFF);
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1091 				 u32 length, u8 *data)
1092 {
1093 	int i;
1094 	u32 buf;
1095 	unsigned long timeout;
1096 
1097 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1098 
1099 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1100 		/* clear it and wait to be cleared */
1101 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1102 
1103 		timeout = jiffies + HZ;
1104 		do {
1105 			udelay(1);
1106 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1107 			if (time_after(jiffies, timeout)) {
1108 				netdev_warn(dev->net,
1109 					    "timeout on OTP_PWR_DN completion");
1110 				return -EIO;
1111 			}
1112 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1113 	}
1114 
1115 	/* set to BYTE program mode */
1116 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1117 
1118 	for (i = 0; i < length; i++) {
1119 		lan78xx_write_reg(dev, OTP_ADDR1,
1120 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1121 		lan78xx_write_reg(dev, OTP_ADDR2,
1122 				  ((offset + i) & OTP_ADDR2_10_3));
1123 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1124 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1125 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1126 
1127 		timeout = jiffies + HZ;
1128 		do {
1129 			udelay(1);
1130 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1131 			if (time_after(jiffies, timeout)) {
1132 				netdev_warn(dev->net,
1133 					    "Timeout on OTP_STATUS completion");
1134 				return -EIO;
1135 			}
1136 		} while (buf & OTP_STATUS_BUSY_);
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1143 			    u32 length, u8 *data)
1144 {
1145 	u8 sig;
1146 	int ret;
1147 
1148 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1149 
1150 	if (ret == 0) {
1151 		if (sig == OTP_INDICATOR_2)
1152 			offset += 0x100;
1153 		else if (sig != OTP_INDICATOR_1)
1154 			ret = -EINVAL;
1155 		if (!ret)
1156 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1157 	}
1158 
1159 	return ret;
1160 }
1161 
1162 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1163 {
1164 	int i, ret;
1165 
1166 	for (i = 0; i < 100; i++) {
1167 		u32 dp_sel;
1168 
1169 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1170 		if (unlikely(ret < 0))
1171 			return -EIO;
1172 
1173 		if (dp_sel & DP_SEL_DPRDY_)
1174 			return 0;
1175 
1176 		usleep_range(40, 100);
1177 	}
1178 
1179 	netdev_warn(dev->net, "%s timed out", __func__);
1180 
1181 	return -EIO;
1182 }
1183 
1184 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1185 				  u32 addr, u32 length, u32 *buf)
1186 {
1187 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1188 	u32 dp_sel;
1189 	int i, ret;
1190 
1191 	if (usb_autopm_get_interface(dev->intf) < 0)
1192 		return 0;
1193 
1194 	mutex_lock(&pdata->dataport_mutex);
1195 
1196 	ret = lan78xx_dataport_wait_not_busy(dev);
1197 	if (ret < 0)
1198 		goto done;
1199 
1200 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1201 
1202 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1203 	dp_sel |= ram_select;
1204 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1205 
1206 	for (i = 0; i < length; i++) {
1207 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1208 
1209 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1210 
1211 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1212 
1213 		ret = lan78xx_dataport_wait_not_busy(dev);
1214 		if (ret < 0)
1215 			goto done;
1216 	}
1217 
1218 done:
1219 	mutex_unlock(&pdata->dataport_mutex);
1220 	usb_autopm_put_interface(dev->intf);
1221 
1222 	return ret;
1223 }
1224 
1225 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1226 				    int index, u8 addr[ETH_ALEN])
1227 {
1228 	u32 temp;
1229 
1230 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1231 		temp = addr[3];
1232 		temp = addr[2] | (temp << 8);
1233 		temp = addr[1] | (temp << 8);
1234 		temp = addr[0] | (temp << 8);
1235 		pdata->pfilter_table[index][1] = temp;
1236 		temp = addr[5];
1237 		temp = addr[4] | (temp << 8);
1238 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1239 		pdata->pfilter_table[index][0] = temp;
1240 	}
1241 }
1242 
1243 /* returns hash bit number for given MAC address */
1244 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1245 {
1246 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1247 }
1248 
1249 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1250 {
1251 	struct lan78xx_priv *pdata =
1252 			container_of(param, struct lan78xx_priv, set_multicast);
1253 	struct lan78xx_net *dev = pdata->dev;
1254 	int i;
1255 
1256 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1257 		  pdata->rfe_ctl);
1258 
1259 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1260 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1261 
1262 	for (i = 1; i < NUM_OF_MAF; i++) {
1263 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1264 		lan78xx_write_reg(dev, MAF_LO(i),
1265 				  pdata->pfilter_table[i][1]);
1266 		lan78xx_write_reg(dev, MAF_HI(i),
1267 				  pdata->pfilter_table[i][0]);
1268 	}
1269 
1270 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1271 }
1272 
1273 static void lan78xx_set_multicast(struct net_device *netdev)
1274 {
1275 	struct lan78xx_net *dev = netdev_priv(netdev);
1276 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1277 	unsigned long flags;
1278 	int i;
1279 
1280 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1281 
1282 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1283 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1284 
1285 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1286 		pdata->mchash_table[i] = 0;
1287 
1288 	/* pfilter_table[0] has own HW address */
1289 	for (i = 1; i < NUM_OF_MAF; i++) {
1290 		pdata->pfilter_table[i][0] = 0;
1291 		pdata->pfilter_table[i][1] = 0;
1292 	}
1293 
1294 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1295 
1296 	if (dev->net->flags & IFF_PROMISC) {
1297 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1298 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1299 	} else {
1300 		if (dev->net->flags & IFF_ALLMULTI) {
1301 			netif_dbg(dev, drv, dev->net,
1302 				  "receive all multicast enabled");
1303 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1304 		}
1305 	}
1306 
1307 	if (netdev_mc_count(dev->net)) {
1308 		struct netdev_hw_addr *ha;
1309 		int i;
1310 
1311 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1312 
1313 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1314 
1315 		i = 1;
1316 		netdev_for_each_mc_addr(ha, netdev) {
1317 			/* set first 32 into Perfect Filter */
1318 			if (i < 33) {
1319 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1320 			} else {
1321 				u32 bitnum = lan78xx_hash(ha->addr);
1322 
1323 				pdata->mchash_table[bitnum / 32] |=
1324 							(1 << (bitnum % 32));
1325 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1326 			}
1327 			i++;
1328 		}
1329 	}
1330 
1331 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1332 
1333 	/* defer register writes to a sleepable context */
1334 	schedule_work(&pdata->set_multicast);
1335 }
1336 
1337 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1338 				      u16 lcladv, u16 rmtadv)
1339 {
1340 	u32 flow = 0, fct_flow = 0;
1341 	u8 cap;
1342 
1343 	if (dev->fc_autoneg)
1344 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1345 	else
1346 		cap = dev->fc_request_control;
1347 
1348 	if (cap & FLOW_CTRL_TX)
1349 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1350 
1351 	if (cap & FLOW_CTRL_RX)
1352 		flow |= FLOW_CR_RX_FCEN_;
1353 
1354 	if (dev->udev->speed == USB_SPEED_SUPER)
1355 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1356 	else if (dev->udev->speed == USB_SPEED_HIGH)
1357 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1358 
1359 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1360 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1361 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1362 
1363 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1364 
1365 	/* threshold value should be set before enabling flow */
1366 	lan78xx_write_reg(dev, FLOW, flow);
1367 
1368 	return 0;
1369 }
1370 
1371 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1372 
1373 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1374 {
1375 	unsigned long start_time = jiffies;
1376 	u32 val;
1377 	int ret;
1378 
1379 	mutex_lock(&dev->phy_mutex);
1380 
1381 	/* Resetting the device while there is activity on the MDIO
1382 	 * bus can result in the MAC interface locking up and not
1383 	 * completing register access transactions.
1384 	 */
1385 	ret = lan78xx_phy_wait_not_busy(dev);
1386 	if (ret < 0)
1387 		goto done;
1388 
1389 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1390 	if (ret < 0)
1391 		goto done;
1392 
1393 	val |= MAC_CR_RST_;
1394 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1395 	if (ret < 0)
1396 		goto done;
1397 
1398 	/* Wait for the reset to complete before allowing any further
1399 	 * MAC register accesses otherwise the MAC may lock up.
1400 	 */
1401 	do {
1402 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1403 		if (ret < 0)
1404 			goto done;
1405 
1406 		if (!(val & MAC_CR_RST_)) {
1407 			ret = 0;
1408 			goto done;
1409 		}
1410 	} while (!time_after(jiffies, start_time + HZ));
1411 
1412 	ret = -ETIMEDOUT;
1413 done:
1414 	mutex_unlock(&dev->phy_mutex);
1415 
1416 	return ret;
1417 }
1418 
1419 static int lan78xx_link_reset(struct lan78xx_net *dev)
1420 {
1421 	struct phy_device *phydev = dev->net->phydev;
1422 	struct ethtool_link_ksettings ecmd;
1423 	int ladv, radv, ret, link;
1424 	u32 buf;
1425 
1426 	/* clear LAN78xx interrupt status */
1427 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1428 	if (unlikely(ret < 0))
1429 		return ret;
1430 
1431 	mutex_lock(&phydev->lock);
1432 	phy_read_status(phydev);
1433 	link = phydev->link;
1434 	mutex_unlock(&phydev->lock);
1435 
1436 	if (!link && dev->link_on) {
1437 		dev->link_on = false;
1438 
1439 		/* reset MAC */
1440 		ret = lan78xx_mac_reset(dev);
1441 		if (ret < 0)
1442 			return ret;
1443 
1444 		del_timer(&dev->stat_monitor);
1445 	} else if (link && !dev->link_on) {
1446 		dev->link_on = true;
1447 
1448 		phy_ethtool_ksettings_get(phydev, &ecmd);
1449 
1450 		if (dev->udev->speed == USB_SPEED_SUPER) {
1451 			if (ecmd.base.speed == 1000) {
1452 				/* disable U2 */
1453 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1454 				if (ret < 0)
1455 					return ret;
1456 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1457 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1458 				if (ret < 0)
1459 					return ret;
1460 				/* enable U1 */
1461 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1462 				if (ret < 0)
1463 					return ret;
1464 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1465 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1466 				if (ret < 0)
1467 					return ret;
1468 			} else {
1469 				/* enable U1 & U2 */
1470 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1471 				if (ret < 0)
1472 					return ret;
1473 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1474 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1475 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1476 				if (ret < 0)
1477 					return ret;
1478 			}
1479 		}
1480 
1481 		ladv = phy_read(phydev, MII_ADVERTISE);
1482 		if (ladv < 0)
1483 			return ladv;
1484 
1485 		radv = phy_read(phydev, MII_LPA);
1486 		if (radv < 0)
1487 			return radv;
1488 
1489 		netif_dbg(dev, link, dev->net,
1490 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1491 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1492 
1493 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1494 						 radv);
1495 		if (ret < 0)
1496 			return ret;
1497 
1498 		if (!timer_pending(&dev->stat_monitor)) {
1499 			dev->delta = 1;
1500 			mod_timer(&dev->stat_monitor,
1501 				  jiffies + STAT_UPDATE_TIMER);
1502 		}
1503 
1504 		lan78xx_rx_urb_submit_all(dev);
1505 
1506 		napi_schedule(&dev->napi);
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 /* some work can't be done in tasklets, so we use keventd
1513  *
1514  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1515  * but tasklet_schedule() doesn't.	hope the failure is rare.
1516  */
1517 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1518 {
1519 	set_bit(work, &dev->flags);
1520 	if (!schedule_delayed_work(&dev->wq, 0))
1521 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1522 }
1523 
1524 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1525 {
1526 	u32 intdata;
1527 
1528 	if (urb->actual_length != 4) {
1529 		netdev_warn(dev->net,
1530 			    "unexpected urb length %d", urb->actual_length);
1531 		return;
1532 	}
1533 
1534 	intdata = get_unaligned_le32(urb->transfer_buffer);
1535 
1536 	if (intdata & INT_ENP_PHY_INT) {
1537 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1538 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1539 
1540 		if (dev->domain_data.phyirq > 0) {
1541 			local_irq_disable();
1542 			generic_handle_irq(dev->domain_data.phyirq);
1543 			local_irq_enable();
1544 		}
1545 	} else {
1546 		netdev_warn(dev->net,
1547 			    "unexpected interrupt: 0x%08x\n", intdata);
1548 	}
1549 }
1550 
1551 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1552 {
1553 	return MAX_EEPROM_SIZE;
1554 }
1555 
1556 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1557 				      struct ethtool_eeprom *ee, u8 *data)
1558 {
1559 	struct lan78xx_net *dev = netdev_priv(netdev);
1560 	int ret;
1561 
1562 	ret = usb_autopm_get_interface(dev->intf);
1563 	if (ret)
1564 		return ret;
1565 
1566 	ee->magic = LAN78XX_EEPROM_MAGIC;
1567 
1568 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1569 
1570 	usb_autopm_put_interface(dev->intf);
1571 
1572 	return ret;
1573 }
1574 
1575 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1576 				      struct ethtool_eeprom *ee, u8 *data)
1577 {
1578 	struct lan78xx_net *dev = netdev_priv(netdev);
1579 	int ret;
1580 
1581 	ret = usb_autopm_get_interface(dev->intf);
1582 	if (ret)
1583 		return ret;
1584 
1585 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1586 	 * to load data from EEPROM
1587 	 */
1588 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1589 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1590 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1591 		 (ee->offset == 0) &&
1592 		 (ee->len == 512) &&
1593 		 (data[0] == OTP_INDICATOR_1))
1594 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1595 
1596 	usb_autopm_put_interface(dev->intf);
1597 
1598 	return ret;
1599 }
1600 
1601 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1602 				u8 *data)
1603 {
1604 	if (stringset == ETH_SS_STATS)
1605 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1606 }
1607 
1608 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1609 {
1610 	if (sset == ETH_SS_STATS)
1611 		return ARRAY_SIZE(lan78xx_gstrings);
1612 	else
1613 		return -EOPNOTSUPP;
1614 }
1615 
1616 static void lan78xx_get_stats(struct net_device *netdev,
1617 			      struct ethtool_stats *stats, u64 *data)
1618 {
1619 	struct lan78xx_net *dev = netdev_priv(netdev);
1620 
1621 	lan78xx_update_stats(dev);
1622 
1623 	mutex_lock(&dev->stats.access_lock);
1624 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1625 	mutex_unlock(&dev->stats.access_lock);
1626 }
1627 
1628 static void lan78xx_get_wol(struct net_device *netdev,
1629 			    struct ethtool_wolinfo *wol)
1630 {
1631 	struct lan78xx_net *dev = netdev_priv(netdev);
1632 	int ret;
1633 	u32 buf;
1634 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1635 
1636 	if (usb_autopm_get_interface(dev->intf) < 0)
1637 		return;
1638 
1639 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1640 	if (unlikely(ret < 0)) {
1641 		wol->supported = 0;
1642 		wol->wolopts = 0;
1643 	} else {
1644 		if (buf & USB_CFG_RMT_WKP_) {
1645 			wol->supported = WAKE_ALL;
1646 			wol->wolopts = pdata->wol;
1647 		} else {
1648 			wol->supported = 0;
1649 			wol->wolopts = 0;
1650 		}
1651 	}
1652 
1653 	usb_autopm_put_interface(dev->intf);
1654 }
1655 
1656 static int lan78xx_set_wol(struct net_device *netdev,
1657 			   struct ethtool_wolinfo *wol)
1658 {
1659 	struct lan78xx_net *dev = netdev_priv(netdev);
1660 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1661 	int ret;
1662 
1663 	ret = usb_autopm_get_interface(dev->intf);
1664 	if (ret < 0)
1665 		return ret;
1666 
1667 	if (wol->wolopts & ~WAKE_ALL)
1668 		return -EINVAL;
1669 
1670 	pdata->wol = wol->wolopts;
1671 
1672 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1673 
1674 	phy_ethtool_set_wol(netdev->phydev, wol);
1675 
1676 	usb_autopm_put_interface(dev->intf);
1677 
1678 	return ret;
1679 }
1680 
1681 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1682 {
1683 	struct lan78xx_net *dev = netdev_priv(net);
1684 	struct phy_device *phydev = net->phydev;
1685 	int ret;
1686 	u32 buf;
1687 
1688 	ret = usb_autopm_get_interface(dev->intf);
1689 	if (ret < 0)
1690 		return ret;
1691 
1692 	ret = phy_ethtool_get_eee(phydev, edata);
1693 	if (ret < 0)
1694 		goto exit;
1695 
1696 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1697 	if (buf & MAC_CR_EEE_EN_) {
1698 		edata->eee_enabled = true;
1699 		edata->eee_active = !!(edata->advertised &
1700 				       edata->lp_advertised);
1701 		edata->tx_lpi_enabled = true;
1702 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1703 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1704 		edata->tx_lpi_timer = buf;
1705 	} else {
1706 		edata->eee_enabled = false;
1707 		edata->eee_active = false;
1708 		edata->tx_lpi_enabled = false;
1709 		edata->tx_lpi_timer = 0;
1710 	}
1711 
1712 	ret = 0;
1713 exit:
1714 	usb_autopm_put_interface(dev->intf);
1715 
1716 	return ret;
1717 }
1718 
1719 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1720 {
1721 	struct lan78xx_net *dev = netdev_priv(net);
1722 	int ret;
1723 	u32 buf;
1724 
1725 	ret = usb_autopm_get_interface(dev->intf);
1726 	if (ret < 0)
1727 		return ret;
1728 
1729 	if (edata->eee_enabled) {
1730 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1731 		buf |= MAC_CR_EEE_EN_;
1732 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1733 
1734 		phy_ethtool_set_eee(net->phydev, edata);
1735 
1736 		buf = (u32)edata->tx_lpi_timer;
1737 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1738 	} else {
1739 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1740 		buf &= ~MAC_CR_EEE_EN_;
1741 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1742 	}
1743 
1744 	usb_autopm_put_interface(dev->intf);
1745 
1746 	return 0;
1747 }
1748 
1749 static u32 lan78xx_get_link(struct net_device *net)
1750 {
1751 	u32 link;
1752 
1753 	mutex_lock(&net->phydev->lock);
1754 	phy_read_status(net->phydev);
1755 	link = net->phydev->link;
1756 	mutex_unlock(&net->phydev->lock);
1757 
1758 	return link;
1759 }
1760 
1761 static void lan78xx_get_drvinfo(struct net_device *net,
1762 				struct ethtool_drvinfo *info)
1763 {
1764 	struct lan78xx_net *dev = netdev_priv(net);
1765 
1766 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1767 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1768 }
1769 
1770 static u32 lan78xx_get_msglevel(struct net_device *net)
1771 {
1772 	struct lan78xx_net *dev = netdev_priv(net);
1773 
1774 	return dev->msg_enable;
1775 }
1776 
1777 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1778 {
1779 	struct lan78xx_net *dev = netdev_priv(net);
1780 
1781 	dev->msg_enable = level;
1782 }
1783 
1784 static int lan78xx_get_link_ksettings(struct net_device *net,
1785 				      struct ethtool_link_ksettings *cmd)
1786 {
1787 	struct lan78xx_net *dev = netdev_priv(net);
1788 	struct phy_device *phydev = net->phydev;
1789 	int ret;
1790 
1791 	ret = usb_autopm_get_interface(dev->intf);
1792 	if (ret < 0)
1793 		return ret;
1794 
1795 	phy_ethtool_ksettings_get(phydev, cmd);
1796 
1797 	usb_autopm_put_interface(dev->intf);
1798 
1799 	return ret;
1800 }
1801 
1802 static int lan78xx_set_link_ksettings(struct net_device *net,
1803 				      const struct ethtool_link_ksettings *cmd)
1804 {
1805 	struct lan78xx_net *dev = netdev_priv(net);
1806 	struct phy_device *phydev = net->phydev;
1807 	int ret = 0;
1808 	int temp;
1809 
1810 	ret = usb_autopm_get_interface(dev->intf);
1811 	if (ret < 0)
1812 		return ret;
1813 
1814 	/* change speed & duplex */
1815 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1816 
1817 	if (!cmd->base.autoneg) {
1818 		/* force link down */
1819 		temp = phy_read(phydev, MII_BMCR);
1820 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1821 		mdelay(1);
1822 		phy_write(phydev, MII_BMCR, temp);
1823 	}
1824 
1825 	usb_autopm_put_interface(dev->intf);
1826 
1827 	return ret;
1828 }
1829 
1830 static void lan78xx_get_pause(struct net_device *net,
1831 			      struct ethtool_pauseparam *pause)
1832 {
1833 	struct lan78xx_net *dev = netdev_priv(net);
1834 	struct phy_device *phydev = net->phydev;
1835 	struct ethtool_link_ksettings ecmd;
1836 
1837 	phy_ethtool_ksettings_get(phydev, &ecmd);
1838 
1839 	pause->autoneg = dev->fc_autoneg;
1840 
1841 	if (dev->fc_request_control & FLOW_CTRL_TX)
1842 		pause->tx_pause = 1;
1843 
1844 	if (dev->fc_request_control & FLOW_CTRL_RX)
1845 		pause->rx_pause = 1;
1846 }
1847 
1848 static int lan78xx_set_pause(struct net_device *net,
1849 			     struct ethtool_pauseparam *pause)
1850 {
1851 	struct lan78xx_net *dev = netdev_priv(net);
1852 	struct phy_device *phydev = net->phydev;
1853 	struct ethtool_link_ksettings ecmd;
1854 	int ret;
1855 
1856 	phy_ethtool_ksettings_get(phydev, &ecmd);
1857 
1858 	if (pause->autoneg && !ecmd.base.autoneg) {
1859 		ret = -EINVAL;
1860 		goto exit;
1861 	}
1862 
1863 	dev->fc_request_control = 0;
1864 	if (pause->rx_pause)
1865 		dev->fc_request_control |= FLOW_CTRL_RX;
1866 
1867 	if (pause->tx_pause)
1868 		dev->fc_request_control |= FLOW_CTRL_TX;
1869 
1870 	if (ecmd.base.autoneg) {
1871 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1872 		u32 mii_adv;
1873 
1874 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1875 				   ecmd.link_modes.advertising);
1876 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1877 				   ecmd.link_modes.advertising);
1878 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1879 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1880 		linkmode_or(ecmd.link_modes.advertising, fc,
1881 			    ecmd.link_modes.advertising);
1882 
1883 		phy_ethtool_ksettings_set(phydev, &ecmd);
1884 	}
1885 
1886 	dev->fc_autoneg = pause->autoneg;
1887 
1888 	ret = 0;
1889 exit:
1890 	return ret;
1891 }
1892 
1893 static int lan78xx_get_regs_len(struct net_device *netdev)
1894 {
1895 	if (!netdev->phydev)
1896 		return (sizeof(lan78xx_regs));
1897 	else
1898 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1899 }
1900 
1901 static void
1902 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1903 		 void *buf)
1904 {
1905 	u32 *data = buf;
1906 	int i, j;
1907 	struct lan78xx_net *dev = netdev_priv(netdev);
1908 
1909 	/* Read Device/MAC registers */
1910 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1911 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1912 
1913 	if (!netdev->phydev)
1914 		return;
1915 
1916 	/* Read PHY registers */
1917 	for (j = 0; j < 32; i++, j++)
1918 		data[i] = phy_read(netdev->phydev, j);
1919 }
1920 
1921 static const struct ethtool_ops lan78xx_ethtool_ops = {
1922 	.get_link	= lan78xx_get_link,
1923 	.nway_reset	= phy_ethtool_nway_reset,
1924 	.get_drvinfo	= lan78xx_get_drvinfo,
1925 	.get_msglevel	= lan78xx_get_msglevel,
1926 	.set_msglevel	= lan78xx_set_msglevel,
1927 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1928 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1929 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1930 	.get_ethtool_stats = lan78xx_get_stats,
1931 	.get_sset_count = lan78xx_get_sset_count,
1932 	.get_strings	= lan78xx_get_strings,
1933 	.get_wol	= lan78xx_get_wol,
1934 	.set_wol	= lan78xx_set_wol,
1935 	.get_ts_info	= ethtool_op_get_ts_info,
1936 	.get_eee	= lan78xx_get_eee,
1937 	.set_eee	= lan78xx_set_eee,
1938 	.get_pauseparam	= lan78xx_get_pause,
1939 	.set_pauseparam	= lan78xx_set_pause,
1940 	.get_link_ksettings = lan78xx_get_link_ksettings,
1941 	.set_link_ksettings = lan78xx_set_link_ksettings,
1942 	.get_regs_len	= lan78xx_get_regs_len,
1943 	.get_regs	= lan78xx_get_regs,
1944 };
1945 
1946 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1947 {
1948 	u32 addr_lo, addr_hi;
1949 	u8 addr[6];
1950 
1951 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1952 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1953 
1954 	addr[0] = addr_lo & 0xFF;
1955 	addr[1] = (addr_lo >> 8) & 0xFF;
1956 	addr[2] = (addr_lo >> 16) & 0xFF;
1957 	addr[3] = (addr_lo >> 24) & 0xFF;
1958 	addr[4] = addr_hi & 0xFF;
1959 	addr[5] = (addr_hi >> 8) & 0xFF;
1960 
1961 	if (!is_valid_ether_addr(addr)) {
1962 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1963 			/* valid address present in Device Tree */
1964 			netif_dbg(dev, ifup, dev->net,
1965 				  "MAC address read from Device Tree");
1966 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1967 						 ETH_ALEN, addr) == 0) ||
1968 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1969 					      ETH_ALEN, addr) == 0)) &&
1970 			   is_valid_ether_addr(addr)) {
1971 			/* eeprom values are valid so use them */
1972 			netif_dbg(dev, ifup, dev->net,
1973 				  "MAC address read from EEPROM");
1974 		} else {
1975 			/* generate random MAC */
1976 			eth_random_addr(addr);
1977 			netif_dbg(dev, ifup, dev->net,
1978 				  "MAC address set to random addr");
1979 		}
1980 
1981 		addr_lo = addr[0] | (addr[1] << 8) |
1982 			  (addr[2] << 16) | (addr[3] << 24);
1983 		addr_hi = addr[4] | (addr[5] << 8);
1984 
1985 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1986 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1987 	}
1988 
1989 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1990 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1991 
1992 	eth_hw_addr_set(dev->net, addr);
1993 }
1994 
1995 /* MDIO read and write wrappers for phylib */
1996 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1997 {
1998 	struct lan78xx_net *dev = bus->priv;
1999 	u32 val, addr;
2000 	int ret;
2001 
2002 	ret = usb_autopm_get_interface(dev->intf);
2003 	if (ret < 0)
2004 		return ret;
2005 
2006 	mutex_lock(&dev->phy_mutex);
2007 
2008 	/* confirm MII not busy */
2009 	ret = lan78xx_phy_wait_not_busy(dev);
2010 	if (ret < 0)
2011 		goto done;
2012 
2013 	/* set the address, index & direction (read from PHY) */
2014 	addr = mii_access(phy_id, idx, MII_READ);
2015 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2016 
2017 	ret = lan78xx_phy_wait_not_busy(dev);
2018 	if (ret < 0)
2019 		goto done;
2020 
2021 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2022 
2023 	ret = (int)(val & 0xFFFF);
2024 
2025 done:
2026 	mutex_unlock(&dev->phy_mutex);
2027 	usb_autopm_put_interface(dev->intf);
2028 
2029 	return ret;
2030 }
2031 
2032 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2033 				 u16 regval)
2034 {
2035 	struct lan78xx_net *dev = bus->priv;
2036 	u32 val, addr;
2037 	int ret;
2038 
2039 	ret = usb_autopm_get_interface(dev->intf);
2040 	if (ret < 0)
2041 		return ret;
2042 
2043 	mutex_lock(&dev->phy_mutex);
2044 
2045 	/* confirm MII not busy */
2046 	ret = lan78xx_phy_wait_not_busy(dev);
2047 	if (ret < 0)
2048 		goto done;
2049 
2050 	val = (u32)regval;
2051 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2052 
2053 	/* set the address, index & direction (write to PHY) */
2054 	addr = mii_access(phy_id, idx, MII_WRITE);
2055 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2056 
2057 	ret = lan78xx_phy_wait_not_busy(dev);
2058 	if (ret < 0)
2059 		goto done;
2060 
2061 done:
2062 	mutex_unlock(&dev->phy_mutex);
2063 	usb_autopm_put_interface(dev->intf);
2064 	return 0;
2065 }
2066 
2067 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2068 {
2069 	struct device_node *node;
2070 	int ret;
2071 
2072 	dev->mdiobus = mdiobus_alloc();
2073 	if (!dev->mdiobus) {
2074 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2075 		return -ENOMEM;
2076 	}
2077 
2078 	dev->mdiobus->priv = (void *)dev;
2079 	dev->mdiobus->read = lan78xx_mdiobus_read;
2080 	dev->mdiobus->write = lan78xx_mdiobus_write;
2081 	dev->mdiobus->name = "lan78xx-mdiobus";
2082 	dev->mdiobus->parent = &dev->udev->dev;
2083 
2084 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2085 		 dev->udev->bus->busnum, dev->udev->devnum);
2086 
2087 	switch (dev->chipid) {
2088 	case ID_REV_CHIP_ID_7800_:
2089 	case ID_REV_CHIP_ID_7850_:
2090 		/* set to internal PHY id */
2091 		dev->mdiobus->phy_mask = ~(1 << 1);
2092 		break;
2093 	case ID_REV_CHIP_ID_7801_:
2094 		/* scan thru PHYAD[2..0] */
2095 		dev->mdiobus->phy_mask = ~(0xFF);
2096 		break;
2097 	}
2098 
2099 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2100 	ret = of_mdiobus_register(dev->mdiobus, node);
2101 	of_node_put(node);
2102 	if (ret) {
2103 		netdev_err(dev->net, "can't register MDIO bus\n");
2104 		goto exit1;
2105 	}
2106 
2107 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2108 	return 0;
2109 exit1:
2110 	mdiobus_free(dev->mdiobus);
2111 	return ret;
2112 }
2113 
2114 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2115 {
2116 	mdiobus_unregister(dev->mdiobus);
2117 	mdiobus_free(dev->mdiobus);
2118 }
2119 
2120 static void lan78xx_link_status_change(struct net_device *net)
2121 {
2122 	struct phy_device *phydev = net->phydev;
2123 	int temp;
2124 
2125 	/* At forced 100 F/H mode, chip may fail to set mode correctly
2126 	 * when cable is switched between long(~50+m) and short one.
2127 	 * As workaround, set to 10 before setting to 100
2128 	 * at forced 100 F/H mode.
2129 	 */
2130 	if (!phydev->autoneg && (phydev->speed == 100)) {
2131 		/* disable phy interrupt */
2132 		temp = phy_read(phydev, LAN88XX_INT_MASK);
2133 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
2134 		phy_write(phydev, LAN88XX_INT_MASK, temp);
2135 
2136 		temp = phy_read(phydev, MII_BMCR);
2137 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
2138 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
2139 		temp |= BMCR_SPEED100;
2140 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
2141 
2142 		/* clear pending interrupt generated while workaround */
2143 		temp = phy_read(phydev, LAN88XX_INT_STS);
2144 
2145 		/* enable phy interrupt back */
2146 		temp = phy_read(phydev, LAN88XX_INT_MASK);
2147 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
2148 		phy_write(phydev, LAN88XX_INT_MASK, temp);
2149 	}
2150 }
2151 
2152 static int irq_map(struct irq_domain *d, unsigned int irq,
2153 		   irq_hw_number_t hwirq)
2154 {
2155 	struct irq_domain_data *data = d->host_data;
2156 
2157 	irq_set_chip_data(irq, data);
2158 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2159 	irq_set_noprobe(irq);
2160 
2161 	return 0;
2162 }
2163 
2164 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2165 {
2166 	irq_set_chip_and_handler(irq, NULL, NULL);
2167 	irq_set_chip_data(irq, NULL);
2168 }
2169 
2170 static const struct irq_domain_ops chip_domain_ops = {
2171 	.map	= irq_map,
2172 	.unmap	= irq_unmap,
2173 };
2174 
2175 static void lan78xx_irq_mask(struct irq_data *irqd)
2176 {
2177 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2178 
2179 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2180 }
2181 
2182 static void lan78xx_irq_unmask(struct irq_data *irqd)
2183 {
2184 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2185 
2186 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2187 }
2188 
2189 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2190 {
2191 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2192 
2193 	mutex_lock(&data->irq_lock);
2194 }
2195 
2196 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2197 {
2198 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2199 	struct lan78xx_net *dev =
2200 			container_of(data, struct lan78xx_net, domain_data);
2201 	u32 buf;
2202 
2203 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2204 	 * are only two callbacks executed in non-atomic contex.
2205 	 */
2206 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2207 	if (buf != data->irqenable)
2208 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2209 
2210 	mutex_unlock(&data->irq_lock);
2211 }
2212 
2213 static struct irq_chip lan78xx_irqchip = {
2214 	.name			= "lan78xx-irqs",
2215 	.irq_mask		= lan78xx_irq_mask,
2216 	.irq_unmask		= lan78xx_irq_unmask,
2217 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2218 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2219 };
2220 
2221 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2222 {
2223 	struct device_node *of_node;
2224 	struct irq_domain *irqdomain;
2225 	unsigned int irqmap = 0;
2226 	u32 buf;
2227 	int ret = 0;
2228 
2229 	of_node = dev->udev->dev.parent->of_node;
2230 
2231 	mutex_init(&dev->domain_data.irq_lock);
2232 
2233 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2234 	dev->domain_data.irqenable = buf;
2235 
2236 	dev->domain_data.irqchip = &lan78xx_irqchip;
2237 	dev->domain_data.irq_handler = handle_simple_irq;
2238 
2239 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2240 					  &chip_domain_ops, &dev->domain_data);
2241 	if (irqdomain) {
2242 		/* create mapping for PHY interrupt */
2243 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2244 		if (!irqmap) {
2245 			irq_domain_remove(irqdomain);
2246 
2247 			irqdomain = NULL;
2248 			ret = -EINVAL;
2249 		}
2250 	} else {
2251 		ret = -EINVAL;
2252 	}
2253 
2254 	dev->domain_data.irqdomain = irqdomain;
2255 	dev->domain_data.phyirq = irqmap;
2256 
2257 	return ret;
2258 }
2259 
2260 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2261 {
2262 	if (dev->domain_data.phyirq > 0) {
2263 		irq_dispose_mapping(dev->domain_data.phyirq);
2264 
2265 		if (dev->domain_data.irqdomain)
2266 			irq_domain_remove(dev->domain_data.irqdomain);
2267 	}
2268 	dev->domain_data.phyirq = 0;
2269 	dev->domain_data.irqdomain = NULL;
2270 }
2271 
2272 static int lan8835_fixup(struct phy_device *phydev)
2273 {
2274 	int buf;
2275 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2276 
2277 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2278 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2279 	buf &= ~0x1800;
2280 	buf |= 0x0800;
2281 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2282 
2283 	/* RGMII MAC TXC Delay Enable */
2284 	lan78xx_write_reg(dev, MAC_RGMII_ID,
2285 			  MAC_RGMII_ID_TXC_DELAY_EN_);
2286 
2287 	/* RGMII TX DLL Tune Adjust */
2288 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2289 
2290 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2291 
2292 	return 1;
2293 }
2294 
2295 static int ksz9031rnx_fixup(struct phy_device *phydev)
2296 {
2297 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2298 
2299 	/* Micrel9301RNX PHY configuration */
2300 	/* RGMII Control Signal Pad Skew */
2301 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2302 	/* RGMII RX Data Pad Skew */
2303 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2304 	/* RGMII RX Clock Pad Skew */
2305 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2306 
2307 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2308 
2309 	return 1;
2310 }
2311 
2312 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2313 {
2314 	u32 buf;
2315 	int ret;
2316 	struct fixed_phy_status fphy_status = {
2317 		.link = 1,
2318 		.speed = SPEED_1000,
2319 		.duplex = DUPLEX_FULL,
2320 	};
2321 	struct phy_device *phydev;
2322 
2323 	phydev = phy_find_first(dev->mdiobus);
2324 	if (!phydev) {
2325 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2326 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2327 		if (IS_ERR(phydev)) {
2328 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2329 			return NULL;
2330 		}
2331 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2332 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2333 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2334 					MAC_RGMII_ID_TXC_DELAY_EN_);
2335 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2336 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2337 		buf |= HW_CFG_CLK125_EN_;
2338 		buf |= HW_CFG_REFCLK25_EN_;
2339 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2340 	} else {
2341 		if (!phydev->drv) {
2342 			netdev_err(dev->net, "no PHY driver found\n");
2343 			return NULL;
2344 		}
2345 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2346 		/* external PHY fixup for KSZ9031RNX */
2347 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2348 						 ksz9031rnx_fixup);
2349 		if (ret < 0) {
2350 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2351 			return NULL;
2352 		}
2353 		/* external PHY fixup for LAN8835 */
2354 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2355 						 lan8835_fixup);
2356 		if (ret < 0) {
2357 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2358 			return NULL;
2359 		}
2360 		/* add more external PHY fixup here if needed */
2361 
2362 		phydev->is_internal = false;
2363 	}
2364 	return phydev;
2365 }
2366 
2367 static int lan78xx_phy_init(struct lan78xx_net *dev)
2368 {
2369 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2370 	int ret;
2371 	u32 mii_adv;
2372 	struct phy_device *phydev;
2373 
2374 	switch (dev->chipid) {
2375 	case ID_REV_CHIP_ID_7801_:
2376 		phydev = lan7801_phy_init(dev);
2377 		if (!phydev) {
2378 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2379 			return -EIO;
2380 		}
2381 		break;
2382 
2383 	case ID_REV_CHIP_ID_7800_:
2384 	case ID_REV_CHIP_ID_7850_:
2385 		phydev = phy_find_first(dev->mdiobus);
2386 		if (!phydev) {
2387 			netdev_err(dev->net, "no PHY found\n");
2388 			return -EIO;
2389 		}
2390 		phydev->is_internal = true;
2391 		dev->interface = PHY_INTERFACE_MODE_GMII;
2392 		break;
2393 
2394 	default:
2395 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2396 		return -EIO;
2397 	}
2398 
2399 	/* if phyirq is not set, use polling mode in phylib */
2400 	if (dev->domain_data.phyirq > 0)
2401 		phydev->irq = dev->domain_data.phyirq;
2402 	else
2403 		phydev->irq = PHY_POLL;
2404 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2405 
2406 	/* set to AUTOMDIX */
2407 	phydev->mdix = ETH_TP_MDI_AUTO;
2408 
2409 	ret = phy_connect_direct(dev->net, phydev,
2410 				 lan78xx_link_status_change,
2411 				 dev->interface);
2412 	if (ret) {
2413 		netdev_err(dev->net, "can't attach PHY to %s\n",
2414 			   dev->mdiobus->id);
2415 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2416 			if (phy_is_pseudo_fixed_link(phydev)) {
2417 				fixed_phy_unregister(phydev);
2418 			} else {
2419 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2420 							     0xfffffff0);
2421 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2422 							     0xfffffff0);
2423 			}
2424 		}
2425 		return -EIO;
2426 	}
2427 
2428 	/* MAC doesn't support 1000T Half */
2429 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2430 
2431 	/* support both flow controls */
2432 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2433 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2434 			   phydev->advertising);
2435 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2436 			   phydev->advertising);
2437 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2438 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2439 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2440 
2441 	if (phydev->mdio.dev.of_node) {
2442 		u32 reg;
2443 		int len;
2444 
2445 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2446 						      "microchip,led-modes",
2447 						      sizeof(u32));
2448 		if (len >= 0) {
2449 			/* Ensure the appropriate LEDs are enabled */
2450 			lan78xx_read_reg(dev, HW_CFG, &reg);
2451 			reg &= ~(HW_CFG_LED0_EN_ |
2452 				 HW_CFG_LED1_EN_ |
2453 				 HW_CFG_LED2_EN_ |
2454 				 HW_CFG_LED3_EN_);
2455 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2456 				(len > 1) * HW_CFG_LED1_EN_ |
2457 				(len > 2) * HW_CFG_LED2_EN_ |
2458 				(len > 3) * HW_CFG_LED3_EN_;
2459 			lan78xx_write_reg(dev, HW_CFG, reg);
2460 		}
2461 	}
2462 
2463 	genphy_config_aneg(phydev);
2464 
2465 	dev->fc_autoneg = phydev->autoneg;
2466 
2467 	return 0;
2468 }
2469 
2470 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2471 {
2472 	u32 buf;
2473 	bool rxenabled;
2474 
2475 	lan78xx_read_reg(dev, MAC_RX, &buf);
2476 
2477 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2478 
2479 	if (rxenabled) {
2480 		buf &= ~MAC_RX_RXEN_;
2481 		lan78xx_write_reg(dev, MAC_RX, buf);
2482 	}
2483 
2484 	/* add 4 to size for FCS */
2485 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2486 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2487 
2488 	lan78xx_write_reg(dev, MAC_RX, buf);
2489 
2490 	if (rxenabled) {
2491 		buf |= MAC_RX_RXEN_;
2492 		lan78xx_write_reg(dev, MAC_RX, buf);
2493 	}
2494 
2495 	return 0;
2496 }
2497 
2498 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2499 {
2500 	struct sk_buff *skb;
2501 	unsigned long flags;
2502 	int count = 0;
2503 
2504 	spin_lock_irqsave(&q->lock, flags);
2505 	while (!skb_queue_empty(q)) {
2506 		struct skb_data	*entry;
2507 		struct urb *urb;
2508 		int ret;
2509 
2510 		skb_queue_walk(q, skb) {
2511 			entry = (struct skb_data *)skb->cb;
2512 			if (entry->state != unlink_start)
2513 				goto found;
2514 		}
2515 		break;
2516 found:
2517 		entry->state = unlink_start;
2518 		urb = entry->urb;
2519 
2520 		/* Get reference count of the URB to avoid it to be
2521 		 * freed during usb_unlink_urb, which may trigger
2522 		 * use-after-free problem inside usb_unlink_urb since
2523 		 * usb_unlink_urb is always racing with .complete
2524 		 * handler(include defer_bh).
2525 		 */
2526 		usb_get_urb(urb);
2527 		spin_unlock_irqrestore(&q->lock, flags);
2528 		/* during some PM-driven resume scenarios,
2529 		 * these (async) unlinks complete immediately
2530 		 */
2531 		ret = usb_unlink_urb(urb);
2532 		if (ret != -EINPROGRESS && ret != 0)
2533 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2534 		else
2535 			count++;
2536 		usb_put_urb(urb);
2537 		spin_lock_irqsave(&q->lock, flags);
2538 	}
2539 	spin_unlock_irqrestore(&q->lock, flags);
2540 	return count;
2541 }
2542 
2543 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2544 {
2545 	struct lan78xx_net *dev = netdev_priv(netdev);
2546 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2547 	int ret;
2548 
2549 	/* no second zero-length packet read wanted after mtu-sized packets */
2550 	if ((max_frame_len % dev->maxpacket) == 0)
2551 		return -EDOM;
2552 
2553 	ret = usb_autopm_get_interface(dev->intf);
2554 	if (ret < 0)
2555 		return ret;
2556 
2557 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2558 	if (!ret)
2559 		netdev->mtu = new_mtu;
2560 
2561 	usb_autopm_put_interface(dev->intf);
2562 
2563 	return ret;
2564 }
2565 
2566 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2567 {
2568 	struct lan78xx_net *dev = netdev_priv(netdev);
2569 	struct sockaddr *addr = p;
2570 	u32 addr_lo, addr_hi;
2571 
2572 	if (netif_running(netdev))
2573 		return -EBUSY;
2574 
2575 	if (!is_valid_ether_addr(addr->sa_data))
2576 		return -EADDRNOTAVAIL;
2577 
2578 	eth_hw_addr_set(netdev, addr->sa_data);
2579 
2580 	addr_lo = netdev->dev_addr[0] |
2581 		  netdev->dev_addr[1] << 8 |
2582 		  netdev->dev_addr[2] << 16 |
2583 		  netdev->dev_addr[3] << 24;
2584 	addr_hi = netdev->dev_addr[4] |
2585 		  netdev->dev_addr[5] << 8;
2586 
2587 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2588 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2589 
2590 	/* Added to support MAC address changes */
2591 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2592 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2593 
2594 	return 0;
2595 }
2596 
2597 /* Enable or disable Rx checksum offload engine */
2598 static int lan78xx_set_features(struct net_device *netdev,
2599 				netdev_features_t features)
2600 {
2601 	struct lan78xx_net *dev = netdev_priv(netdev);
2602 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2603 	unsigned long flags;
2604 
2605 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2606 
2607 	if (features & NETIF_F_RXCSUM) {
2608 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2609 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2610 	} else {
2611 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2612 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2613 	}
2614 
2615 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2616 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2617 	else
2618 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2619 
2620 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2621 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2622 	else
2623 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2624 
2625 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2626 
2627 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2628 
2629 	return 0;
2630 }
2631 
2632 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2633 {
2634 	struct lan78xx_priv *pdata =
2635 			container_of(param, struct lan78xx_priv, set_vlan);
2636 	struct lan78xx_net *dev = pdata->dev;
2637 
2638 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2639 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2640 }
2641 
2642 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2643 				   __be16 proto, u16 vid)
2644 {
2645 	struct lan78xx_net *dev = netdev_priv(netdev);
2646 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2647 	u16 vid_bit_index;
2648 	u16 vid_dword_index;
2649 
2650 	vid_dword_index = (vid >> 5) & 0x7F;
2651 	vid_bit_index = vid & 0x1F;
2652 
2653 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2654 
2655 	/* defer register writes to a sleepable context */
2656 	schedule_work(&pdata->set_vlan);
2657 
2658 	return 0;
2659 }
2660 
2661 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2662 				    __be16 proto, u16 vid)
2663 {
2664 	struct lan78xx_net *dev = netdev_priv(netdev);
2665 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2666 	u16 vid_bit_index;
2667 	u16 vid_dword_index;
2668 
2669 	vid_dword_index = (vid >> 5) & 0x7F;
2670 	vid_bit_index = vid & 0x1F;
2671 
2672 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2673 
2674 	/* defer register writes to a sleepable context */
2675 	schedule_work(&pdata->set_vlan);
2676 
2677 	return 0;
2678 }
2679 
2680 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2681 {
2682 	int ret;
2683 	u32 buf;
2684 	u32 regs[6] = { 0 };
2685 
2686 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2687 	if (buf & USB_CFG1_LTM_ENABLE_) {
2688 		u8 temp[2];
2689 		/* Get values from EEPROM first */
2690 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2691 			if (temp[0] == 24) {
2692 				ret = lan78xx_read_raw_eeprom(dev,
2693 							      temp[1] * 2,
2694 							      24,
2695 							      (u8 *)regs);
2696 				if (ret < 0)
2697 					return;
2698 			}
2699 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2700 			if (temp[0] == 24) {
2701 				ret = lan78xx_read_raw_otp(dev,
2702 							   temp[1] * 2,
2703 							   24,
2704 							   (u8 *)regs);
2705 				if (ret < 0)
2706 					return;
2707 			}
2708 		}
2709 	}
2710 
2711 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2712 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2713 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2714 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2715 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2716 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2717 }
2718 
2719 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2720 {
2721 	int result = 0;
2722 
2723 	switch (dev->udev->speed) {
2724 	case USB_SPEED_SUPER:
2725 		dev->rx_urb_size = RX_SS_URB_SIZE;
2726 		dev->tx_urb_size = TX_SS_URB_SIZE;
2727 		dev->n_rx_urbs = RX_SS_URB_NUM;
2728 		dev->n_tx_urbs = TX_SS_URB_NUM;
2729 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2730 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2731 		break;
2732 	case USB_SPEED_HIGH:
2733 		dev->rx_urb_size = RX_HS_URB_SIZE;
2734 		dev->tx_urb_size = TX_HS_URB_SIZE;
2735 		dev->n_rx_urbs = RX_HS_URB_NUM;
2736 		dev->n_tx_urbs = TX_HS_URB_NUM;
2737 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2738 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2739 		break;
2740 	case USB_SPEED_FULL:
2741 		dev->rx_urb_size = RX_FS_URB_SIZE;
2742 		dev->tx_urb_size = TX_FS_URB_SIZE;
2743 		dev->n_rx_urbs = RX_FS_URB_NUM;
2744 		dev->n_tx_urbs = TX_FS_URB_NUM;
2745 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2746 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2747 		break;
2748 	default:
2749 		netdev_warn(dev->net, "USB bus speed not supported\n");
2750 		result = -EIO;
2751 		break;
2752 	}
2753 
2754 	return result;
2755 }
2756 
2757 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2758 {
2759 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2760 }
2761 
2762 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2763 			   u32 hw_disabled)
2764 {
2765 	unsigned long timeout;
2766 	bool stopped = true;
2767 	int ret;
2768 	u32 buf;
2769 
2770 	/* Stop the h/w block (if not already stopped) */
2771 
2772 	ret = lan78xx_read_reg(dev, reg, &buf);
2773 	if (ret < 0)
2774 		return ret;
2775 
2776 	if (buf & hw_enabled) {
2777 		buf &= ~hw_enabled;
2778 
2779 		ret = lan78xx_write_reg(dev, reg, buf);
2780 		if (ret < 0)
2781 			return ret;
2782 
2783 		stopped = false;
2784 		timeout = jiffies + HW_DISABLE_TIMEOUT;
2785 		do  {
2786 			ret = lan78xx_read_reg(dev, reg, &buf);
2787 			if (ret < 0)
2788 				return ret;
2789 
2790 			if (buf & hw_disabled)
2791 				stopped = true;
2792 			else
2793 				msleep(HW_DISABLE_DELAY_MS);
2794 		} while (!stopped && !time_after(jiffies, timeout));
2795 	}
2796 
2797 	ret = stopped ? 0 : -ETIME;
2798 
2799 	return ret;
2800 }
2801 
2802 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2803 {
2804 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2805 }
2806 
2807 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2808 {
2809 	int ret;
2810 
2811 	netif_dbg(dev, drv, dev->net, "start tx path");
2812 
2813 	/* Start the MAC transmitter */
2814 
2815 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2816 	if (ret < 0)
2817 		return ret;
2818 
2819 	/* Start the Tx FIFO */
2820 
2821 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2822 	if (ret < 0)
2823 		return ret;
2824 
2825 	return 0;
2826 }
2827 
2828 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2829 {
2830 	int ret;
2831 
2832 	netif_dbg(dev, drv, dev->net, "stop tx path");
2833 
2834 	/* Stop the Tx FIFO */
2835 
2836 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2837 	if (ret < 0)
2838 		return ret;
2839 
2840 	/* Stop the MAC transmitter */
2841 
2842 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2843 	if (ret < 0)
2844 		return ret;
2845 
2846 	return 0;
2847 }
2848 
2849 /* The caller must ensure the Tx path is stopped before calling
2850  * lan78xx_flush_tx_fifo().
2851  */
2852 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2853 {
2854 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2855 }
2856 
2857 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2858 {
2859 	int ret;
2860 
2861 	netif_dbg(dev, drv, dev->net, "start rx path");
2862 
2863 	/* Start the Rx FIFO */
2864 
2865 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2866 	if (ret < 0)
2867 		return ret;
2868 
2869 	/* Start the MAC receiver*/
2870 
2871 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2872 	if (ret < 0)
2873 		return ret;
2874 
2875 	return 0;
2876 }
2877 
2878 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2879 {
2880 	int ret;
2881 
2882 	netif_dbg(dev, drv, dev->net, "stop rx path");
2883 
2884 	/* Stop the MAC receiver */
2885 
2886 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2887 	if (ret < 0)
2888 		return ret;
2889 
2890 	/* Stop the Rx FIFO */
2891 
2892 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2893 	if (ret < 0)
2894 		return ret;
2895 
2896 	return 0;
2897 }
2898 
2899 /* The caller must ensure the Rx path is stopped before calling
2900  * lan78xx_flush_rx_fifo().
2901  */
2902 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2903 {
2904 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2905 }
2906 
2907 static int lan78xx_reset(struct lan78xx_net *dev)
2908 {
2909 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2910 	unsigned long timeout;
2911 	int ret;
2912 	u32 buf;
2913 	u8 sig;
2914 
2915 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2916 	if (ret < 0)
2917 		return ret;
2918 
2919 	buf |= HW_CFG_LRST_;
2920 
2921 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2922 	if (ret < 0)
2923 		return ret;
2924 
2925 	timeout = jiffies + HZ;
2926 	do {
2927 		mdelay(1);
2928 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2929 		if (ret < 0)
2930 			return ret;
2931 
2932 		if (time_after(jiffies, timeout)) {
2933 			netdev_warn(dev->net,
2934 				    "timeout on completion of LiteReset");
2935 			ret = -ETIMEDOUT;
2936 			return ret;
2937 		}
2938 	} while (buf & HW_CFG_LRST_);
2939 
2940 	lan78xx_init_mac_address(dev);
2941 
2942 	/* save DEVID for later usage */
2943 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2944 	if (ret < 0)
2945 		return ret;
2946 
2947 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2948 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2949 
2950 	/* Respond to the IN token with a NAK */
2951 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2952 	if (ret < 0)
2953 		return ret;
2954 
2955 	buf |= USB_CFG_BIR_;
2956 
2957 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2958 	if (ret < 0)
2959 		return ret;
2960 
2961 	/* Init LTM */
2962 	lan78xx_init_ltm(dev);
2963 
2964 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
2965 	if (ret < 0)
2966 		return ret;
2967 
2968 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
2969 	if (ret < 0)
2970 		return ret;
2971 
2972 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2973 	if (ret < 0)
2974 		return ret;
2975 
2976 	buf |= HW_CFG_MEF_;
2977 
2978 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2979 	if (ret < 0)
2980 		return ret;
2981 
2982 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2983 	if (ret < 0)
2984 		return ret;
2985 
2986 	buf |= USB_CFG_BCE_;
2987 
2988 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2989 	if (ret < 0)
2990 		return ret;
2991 
2992 	/* set FIFO sizes */
2993 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2994 
2995 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2996 	if (ret < 0)
2997 		return ret;
2998 
2999 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3000 
3001 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3002 	if (ret < 0)
3003 		return ret;
3004 
3005 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3006 	if (ret < 0)
3007 		return ret;
3008 
3009 	ret = lan78xx_write_reg(dev, FLOW, 0);
3010 	if (ret < 0)
3011 		return ret;
3012 
3013 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3014 	if (ret < 0)
3015 		return ret;
3016 
3017 	/* Don't need rfe_ctl_lock during initialisation */
3018 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3019 	if (ret < 0)
3020 		return ret;
3021 
3022 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3023 
3024 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3025 	if (ret < 0)
3026 		return ret;
3027 
3028 	/* Enable or disable checksum offload engines */
3029 	ret = lan78xx_set_features(dev->net, dev->net->features);
3030 	if (ret < 0)
3031 		return ret;
3032 
3033 	lan78xx_set_multicast(dev->net);
3034 
3035 	/* reset PHY */
3036 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3037 	if (ret < 0)
3038 		return ret;
3039 
3040 	buf |= PMT_CTL_PHY_RST_;
3041 
3042 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3043 	if (ret < 0)
3044 		return ret;
3045 
3046 	timeout = jiffies + HZ;
3047 	do {
3048 		mdelay(1);
3049 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3050 		if (ret < 0)
3051 			return ret;
3052 
3053 		if (time_after(jiffies, timeout)) {
3054 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3055 			ret = -ETIMEDOUT;
3056 			return ret;
3057 		}
3058 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3059 
3060 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3061 	if (ret < 0)
3062 		return ret;
3063 
3064 	/* LAN7801 only has RGMII mode */
3065 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3066 		buf &= ~MAC_CR_GMII_EN_;
3067 
3068 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
3069 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3070 		if (!ret && sig != EEPROM_INDICATOR) {
3071 			/* Implies there is no external eeprom. Set mac speed */
3072 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3073 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3074 		}
3075 	}
3076 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3077 	if (ret < 0)
3078 		return ret;
3079 
3080 	ret = lan78xx_set_rx_max_frame_length(dev,
3081 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3082 
3083 	return ret;
3084 }
3085 
3086 static void lan78xx_init_stats(struct lan78xx_net *dev)
3087 {
3088 	u32 *p;
3089 	int i;
3090 
3091 	/* initialize for stats update
3092 	 * some counters are 20bits and some are 32bits
3093 	 */
3094 	p = (u32 *)&dev->stats.rollover_max;
3095 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3096 		p[i] = 0xFFFFF;
3097 
3098 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3099 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3100 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3101 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3102 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3103 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3104 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3105 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3106 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3107 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3108 
3109 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3110 }
3111 
3112 static int lan78xx_open(struct net_device *net)
3113 {
3114 	struct lan78xx_net *dev = netdev_priv(net);
3115 	int ret;
3116 
3117 	netif_dbg(dev, ifup, dev->net, "open device");
3118 
3119 	ret = usb_autopm_get_interface(dev->intf);
3120 	if (ret < 0)
3121 		return ret;
3122 
3123 	mutex_lock(&dev->dev_mutex);
3124 
3125 	phy_start(net->phydev);
3126 
3127 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3128 
3129 	/* for Link Check */
3130 	if (dev->urb_intr) {
3131 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3132 		if (ret < 0) {
3133 			netif_err(dev, ifup, dev->net,
3134 				  "intr submit %d\n", ret);
3135 			goto done;
3136 		}
3137 	}
3138 
3139 	ret = lan78xx_flush_rx_fifo(dev);
3140 	if (ret < 0)
3141 		goto done;
3142 	ret = lan78xx_flush_tx_fifo(dev);
3143 	if (ret < 0)
3144 		goto done;
3145 
3146 	ret = lan78xx_start_tx_path(dev);
3147 	if (ret < 0)
3148 		goto done;
3149 	ret = lan78xx_start_rx_path(dev);
3150 	if (ret < 0)
3151 		goto done;
3152 
3153 	lan78xx_init_stats(dev);
3154 
3155 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3156 
3157 	netif_start_queue(net);
3158 
3159 	dev->link_on = false;
3160 
3161 	napi_enable(&dev->napi);
3162 
3163 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3164 done:
3165 	mutex_unlock(&dev->dev_mutex);
3166 
3167 	usb_autopm_put_interface(dev->intf);
3168 
3169 	return ret;
3170 }
3171 
3172 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3173 {
3174 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3175 	DECLARE_WAITQUEUE(wait, current);
3176 	int temp;
3177 
3178 	/* ensure there are no more active urbs */
3179 	add_wait_queue(&unlink_wakeup, &wait);
3180 	set_current_state(TASK_UNINTERRUPTIBLE);
3181 	dev->wait = &unlink_wakeup;
3182 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3183 
3184 	/* maybe wait for deletions to finish. */
3185 	while (!skb_queue_empty(&dev->rxq) ||
3186 	       !skb_queue_empty(&dev->txq)) {
3187 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3188 		set_current_state(TASK_UNINTERRUPTIBLE);
3189 		netif_dbg(dev, ifdown, dev->net,
3190 			  "waited for %d urb completions", temp);
3191 	}
3192 	set_current_state(TASK_RUNNING);
3193 	dev->wait = NULL;
3194 	remove_wait_queue(&unlink_wakeup, &wait);
3195 
3196 	/* empty Rx done, Rx overflow and Tx pend queues
3197 	 */
3198 	while (!skb_queue_empty(&dev->rxq_done)) {
3199 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3200 
3201 		lan78xx_release_rx_buf(dev, skb);
3202 	}
3203 
3204 	skb_queue_purge(&dev->rxq_overflow);
3205 	skb_queue_purge(&dev->txq_pend);
3206 }
3207 
3208 static int lan78xx_stop(struct net_device *net)
3209 {
3210 	struct lan78xx_net *dev = netdev_priv(net);
3211 
3212 	netif_dbg(dev, ifup, dev->net, "stop device");
3213 
3214 	mutex_lock(&dev->dev_mutex);
3215 
3216 	if (timer_pending(&dev->stat_monitor))
3217 		del_timer_sync(&dev->stat_monitor);
3218 
3219 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3220 	netif_stop_queue(net);
3221 	napi_disable(&dev->napi);
3222 
3223 	lan78xx_terminate_urbs(dev);
3224 
3225 	netif_info(dev, ifdown, dev->net,
3226 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3227 		   net->stats.rx_packets, net->stats.tx_packets,
3228 		   net->stats.rx_errors, net->stats.tx_errors);
3229 
3230 	/* ignore errors that occur stopping the Tx and Rx data paths */
3231 	lan78xx_stop_tx_path(dev);
3232 	lan78xx_stop_rx_path(dev);
3233 
3234 	if (net->phydev)
3235 		phy_stop(net->phydev);
3236 
3237 	usb_kill_urb(dev->urb_intr);
3238 
3239 	/* deferred work (task, timer, softirq) must also stop.
3240 	 * can't flush_scheduled_work() until we drop rtnl (later),
3241 	 * else workers could deadlock; so make workers a NOP.
3242 	 */
3243 	clear_bit(EVENT_TX_HALT, &dev->flags);
3244 	clear_bit(EVENT_RX_HALT, &dev->flags);
3245 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3246 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3247 
3248 	cancel_delayed_work_sync(&dev->wq);
3249 
3250 	usb_autopm_put_interface(dev->intf);
3251 
3252 	mutex_unlock(&dev->dev_mutex);
3253 
3254 	return 0;
3255 }
3256 
3257 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3258 			       struct sk_buff_head *list, enum skb_state state)
3259 {
3260 	unsigned long flags;
3261 	enum skb_state old_state;
3262 	struct skb_data *entry = (struct skb_data *)skb->cb;
3263 
3264 	spin_lock_irqsave(&list->lock, flags);
3265 	old_state = entry->state;
3266 	entry->state = state;
3267 
3268 	__skb_unlink(skb, list);
3269 	spin_unlock(&list->lock);
3270 	spin_lock(&dev->rxq_done.lock);
3271 
3272 	__skb_queue_tail(&dev->rxq_done, skb);
3273 	if (skb_queue_len(&dev->rxq_done) == 1)
3274 		napi_schedule(&dev->napi);
3275 
3276 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3277 
3278 	return old_state;
3279 }
3280 
3281 static void tx_complete(struct urb *urb)
3282 {
3283 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3284 	struct skb_data *entry = (struct skb_data *)skb->cb;
3285 	struct lan78xx_net *dev = entry->dev;
3286 
3287 	if (urb->status == 0) {
3288 		dev->net->stats.tx_packets += entry->num_of_packet;
3289 		dev->net->stats.tx_bytes += entry->length;
3290 	} else {
3291 		dev->net->stats.tx_errors += entry->num_of_packet;
3292 
3293 		switch (urb->status) {
3294 		case -EPIPE:
3295 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3296 			break;
3297 
3298 		/* software-driven interface shutdown */
3299 		case -ECONNRESET:
3300 		case -ESHUTDOWN:
3301 			netif_dbg(dev, tx_err, dev->net,
3302 				  "tx err interface gone %d\n",
3303 				  entry->urb->status);
3304 			break;
3305 
3306 		case -EPROTO:
3307 		case -ETIME:
3308 		case -EILSEQ:
3309 			netif_stop_queue(dev->net);
3310 			netif_dbg(dev, tx_err, dev->net,
3311 				  "tx err queue stopped %d\n",
3312 				  entry->urb->status);
3313 			break;
3314 		default:
3315 			netif_dbg(dev, tx_err, dev->net,
3316 				  "unknown tx err %d\n",
3317 				  entry->urb->status);
3318 			break;
3319 		}
3320 	}
3321 
3322 	usb_autopm_put_interface_async(dev->intf);
3323 
3324 	skb_unlink(skb, &dev->txq);
3325 
3326 	lan78xx_release_tx_buf(dev, skb);
3327 
3328 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3329 	 */
3330 	if (skb_queue_empty(&dev->txq) &&
3331 	    !skb_queue_empty(&dev->txq_pend))
3332 		napi_schedule(&dev->napi);
3333 }
3334 
3335 static void lan78xx_queue_skb(struct sk_buff_head *list,
3336 			      struct sk_buff *newsk, enum skb_state state)
3337 {
3338 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3339 
3340 	__skb_queue_tail(list, newsk);
3341 	entry->state = state;
3342 }
3343 
3344 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3345 {
3346 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3347 }
3348 
3349 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3350 {
3351 	return dev->tx_pend_data_len;
3352 }
3353 
3354 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3355 				    struct sk_buff *skb,
3356 				    unsigned int *tx_pend_data_len)
3357 {
3358 	unsigned long flags;
3359 
3360 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3361 
3362 	__skb_queue_tail(&dev->txq_pend, skb);
3363 
3364 	dev->tx_pend_data_len += skb->len;
3365 	*tx_pend_data_len = dev->tx_pend_data_len;
3366 
3367 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3368 }
3369 
3370 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3371 					 struct sk_buff *skb,
3372 					 unsigned int *tx_pend_data_len)
3373 {
3374 	unsigned long flags;
3375 
3376 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3377 
3378 	__skb_queue_head(&dev->txq_pend, skb);
3379 
3380 	dev->tx_pend_data_len += skb->len;
3381 	*tx_pend_data_len = dev->tx_pend_data_len;
3382 
3383 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3384 }
3385 
3386 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3387 				    struct sk_buff **skb,
3388 				    unsigned int *tx_pend_data_len)
3389 {
3390 	unsigned long flags;
3391 
3392 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3393 
3394 	*skb = __skb_dequeue(&dev->txq_pend);
3395 	if (*skb)
3396 		dev->tx_pend_data_len -= (*skb)->len;
3397 	*tx_pend_data_len = dev->tx_pend_data_len;
3398 
3399 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3400 }
3401 
3402 static netdev_tx_t
3403 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3404 {
3405 	struct lan78xx_net *dev = netdev_priv(net);
3406 	unsigned int tx_pend_data_len;
3407 
3408 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3409 		schedule_delayed_work(&dev->wq, 0);
3410 
3411 	skb_tx_timestamp(skb);
3412 
3413 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3414 
3415 	/* Set up a Tx URB if none is in progress */
3416 
3417 	if (skb_queue_empty(&dev->txq))
3418 		napi_schedule(&dev->napi);
3419 
3420 	/* Stop stack Tx queue if we have enough data to fill
3421 	 * all the free Tx URBs.
3422 	 */
3423 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3424 		netif_stop_queue(net);
3425 
3426 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3427 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3428 
3429 		/* Kick off transmission of pending data */
3430 
3431 		if (!skb_queue_empty(&dev->txq_free))
3432 			napi_schedule(&dev->napi);
3433 	}
3434 
3435 	return NETDEV_TX_OK;
3436 }
3437 
3438 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3439 {
3440 	struct lan78xx_priv *pdata = NULL;
3441 	int ret;
3442 	int i;
3443 
3444 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3445 
3446 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3447 	if (!pdata) {
3448 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3449 		return -ENOMEM;
3450 	}
3451 
3452 	pdata->dev = dev;
3453 
3454 	spin_lock_init(&pdata->rfe_ctl_lock);
3455 	mutex_init(&pdata->dataport_mutex);
3456 
3457 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3458 
3459 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3460 		pdata->vlan_table[i] = 0;
3461 
3462 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3463 
3464 	dev->net->features = 0;
3465 
3466 	if (DEFAULT_TX_CSUM_ENABLE)
3467 		dev->net->features |= NETIF_F_HW_CSUM;
3468 
3469 	if (DEFAULT_RX_CSUM_ENABLE)
3470 		dev->net->features |= NETIF_F_RXCSUM;
3471 
3472 	if (DEFAULT_TSO_CSUM_ENABLE)
3473 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3474 
3475 	if (DEFAULT_VLAN_RX_OFFLOAD)
3476 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3477 
3478 	if (DEFAULT_VLAN_FILTER_ENABLE)
3479 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3480 
3481 	dev->net->hw_features = dev->net->features;
3482 
3483 	ret = lan78xx_setup_irq_domain(dev);
3484 	if (ret < 0) {
3485 		netdev_warn(dev->net,
3486 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3487 		goto out1;
3488 	}
3489 
3490 	/* Init all registers */
3491 	ret = lan78xx_reset(dev);
3492 	if (ret) {
3493 		netdev_warn(dev->net, "Registers INIT FAILED....");
3494 		goto out2;
3495 	}
3496 
3497 	ret = lan78xx_mdio_init(dev);
3498 	if (ret) {
3499 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3500 		goto out2;
3501 	}
3502 
3503 	dev->net->flags |= IFF_MULTICAST;
3504 
3505 	pdata->wol = WAKE_MAGIC;
3506 
3507 	return ret;
3508 
3509 out2:
3510 	lan78xx_remove_irq_domain(dev);
3511 
3512 out1:
3513 	netdev_warn(dev->net, "Bind routine FAILED");
3514 	cancel_work_sync(&pdata->set_multicast);
3515 	cancel_work_sync(&pdata->set_vlan);
3516 	kfree(pdata);
3517 	return ret;
3518 }
3519 
3520 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3521 {
3522 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3523 
3524 	lan78xx_remove_irq_domain(dev);
3525 
3526 	lan78xx_remove_mdio(dev);
3527 
3528 	if (pdata) {
3529 		cancel_work_sync(&pdata->set_multicast);
3530 		cancel_work_sync(&pdata->set_vlan);
3531 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3532 		kfree(pdata);
3533 		pdata = NULL;
3534 		dev->data[0] = 0;
3535 	}
3536 }
3537 
3538 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3539 				    struct sk_buff *skb,
3540 				    u32 rx_cmd_a, u32 rx_cmd_b)
3541 {
3542 	/* HW Checksum offload appears to be flawed if used when not stripping
3543 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3544 	 */
3545 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3546 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3547 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3548 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3549 		skb->ip_summed = CHECKSUM_NONE;
3550 	} else {
3551 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3552 		skb->ip_summed = CHECKSUM_COMPLETE;
3553 	}
3554 }
3555 
3556 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3557 				    struct sk_buff *skb,
3558 				    u32 rx_cmd_a, u32 rx_cmd_b)
3559 {
3560 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3561 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3562 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3563 				       (rx_cmd_b & 0xffff));
3564 }
3565 
3566 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3567 {
3568 	dev->net->stats.rx_packets++;
3569 	dev->net->stats.rx_bytes += skb->len;
3570 
3571 	skb->protocol = eth_type_trans(skb, dev->net);
3572 
3573 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3574 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3575 	memset(skb->cb, 0, sizeof(struct skb_data));
3576 
3577 	if (skb_defer_rx_timestamp(skb))
3578 		return;
3579 
3580 	napi_gro_receive(&dev->napi, skb);
3581 }
3582 
3583 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3584 		      int budget, int *work_done)
3585 {
3586 	if (skb->len < RX_SKB_MIN_LEN)
3587 		return 0;
3588 
3589 	/* Extract frames from the URB buffer and pass each one to
3590 	 * the stack in a new NAPI SKB.
3591 	 */
3592 	while (skb->len > 0) {
3593 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3594 		u16 rx_cmd_c;
3595 		unsigned char *packet;
3596 
3597 		rx_cmd_a = get_unaligned_le32(skb->data);
3598 		skb_pull(skb, sizeof(rx_cmd_a));
3599 
3600 		rx_cmd_b = get_unaligned_le32(skb->data);
3601 		skb_pull(skb, sizeof(rx_cmd_b));
3602 
3603 		rx_cmd_c = get_unaligned_le16(skb->data);
3604 		skb_pull(skb, sizeof(rx_cmd_c));
3605 
3606 		packet = skb->data;
3607 
3608 		/* get the packet length */
3609 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3610 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3611 
3612 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3613 			netif_dbg(dev, rx_err, dev->net,
3614 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3615 		} else {
3616 			u32 frame_len = size - ETH_FCS_LEN;
3617 			struct sk_buff *skb2;
3618 
3619 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3620 			if (!skb2)
3621 				return 0;
3622 
3623 			memcpy(skb2->data, packet, frame_len);
3624 
3625 			skb_put(skb2, frame_len);
3626 
3627 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3628 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3629 
3630 			/* Processing of the URB buffer must complete once
3631 			 * it has started. If the NAPI work budget is exhausted
3632 			 * while frames remain they are added to the overflow
3633 			 * queue for delivery in the next NAPI polling cycle.
3634 			 */
3635 			if (*work_done < budget) {
3636 				lan78xx_skb_return(dev, skb2);
3637 				++(*work_done);
3638 			} else {
3639 				skb_queue_tail(&dev->rxq_overflow, skb2);
3640 			}
3641 		}
3642 
3643 		skb_pull(skb, size);
3644 
3645 		/* skip padding bytes before the next frame starts */
3646 		if (skb->len)
3647 			skb_pull(skb, align_count);
3648 	}
3649 
3650 	return 1;
3651 }
3652 
3653 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3654 			      int budget, int *work_done)
3655 {
3656 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3657 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3658 		dev->net->stats.rx_errors++;
3659 	}
3660 }
3661 
3662 static void rx_complete(struct urb *urb)
3663 {
3664 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3665 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3666 	struct lan78xx_net *dev = entry->dev;
3667 	int urb_status = urb->status;
3668 	enum skb_state state;
3669 
3670 	netif_dbg(dev, rx_status, dev->net,
3671 		  "rx done: status %d", urb->status);
3672 
3673 	skb_put(skb, urb->actual_length);
3674 	state = rx_done;
3675 
3676 	if (urb != entry->urb)
3677 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3678 
3679 	switch (urb_status) {
3680 	case 0:
3681 		if (skb->len < RX_SKB_MIN_LEN) {
3682 			state = rx_cleanup;
3683 			dev->net->stats.rx_errors++;
3684 			dev->net->stats.rx_length_errors++;
3685 			netif_dbg(dev, rx_err, dev->net,
3686 				  "rx length %d\n", skb->len);
3687 		}
3688 		usb_mark_last_busy(dev->udev);
3689 		break;
3690 	case -EPIPE:
3691 		dev->net->stats.rx_errors++;
3692 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3693 		fallthrough;
3694 	case -ECONNRESET:				/* async unlink */
3695 	case -ESHUTDOWN:				/* hardware gone */
3696 		netif_dbg(dev, ifdown, dev->net,
3697 			  "rx shutdown, code %d\n", urb_status);
3698 		state = rx_cleanup;
3699 		break;
3700 	case -EPROTO:
3701 	case -ETIME:
3702 	case -EILSEQ:
3703 		dev->net->stats.rx_errors++;
3704 		state = rx_cleanup;
3705 		break;
3706 
3707 	/* data overrun ... flush fifo? */
3708 	case -EOVERFLOW:
3709 		dev->net->stats.rx_over_errors++;
3710 		fallthrough;
3711 
3712 	default:
3713 		state = rx_cleanup;
3714 		dev->net->stats.rx_errors++;
3715 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3716 		break;
3717 	}
3718 
3719 	state = defer_bh(dev, skb, &dev->rxq, state);
3720 }
3721 
3722 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3723 {
3724 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3725 	size_t size = dev->rx_urb_size;
3726 	struct urb *urb = entry->urb;
3727 	unsigned long lockflags;
3728 	int ret = 0;
3729 
3730 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3731 			  skb->data, size, rx_complete, skb);
3732 
3733 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3734 
3735 	if (netif_device_present(dev->net) &&
3736 	    netif_running(dev->net) &&
3737 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3738 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3739 		ret = usb_submit_urb(urb, flags);
3740 		switch (ret) {
3741 		case 0:
3742 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3743 			break;
3744 		case -EPIPE:
3745 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3746 			break;
3747 		case -ENODEV:
3748 		case -ENOENT:
3749 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3750 			netif_device_detach(dev->net);
3751 			break;
3752 		case -EHOSTUNREACH:
3753 			ret = -ENOLINK;
3754 			napi_schedule(&dev->napi);
3755 			break;
3756 		default:
3757 			netif_dbg(dev, rx_err, dev->net,
3758 				  "rx submit, %d\n", ret);
3759 			napi_schedule(&dev->napi);
3760 			break;
3761 		}
3762 	} else {
3763 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3764 		ret = -ENOLINK;
3765 	}
3766 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3767 
3768 	if (ret)
3769 		lan78xx_release_rx_buf(dev, skb);
3770 
3771 	return ret;
3772 }
3773 
3774 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3775 {
3776 	struct sk_buff *rx_buf;
3777 
3778 	/* Ensure the maximum number of Rx URBs is submitted
3779 	 */
3780 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3781 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3782 			break;
3783 	}
3784 }
3785 
3786 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3787 				    struct sk_buff *rx_buf)
3788 {
3789 	/* reset SKB data pointers */
3790 
3791 	rx_buf->data = rx_buf->head;
3792 	skb_reset_tail_pointer(rx_buf);
3793 	rx_buf->len = 0;
3794 	rx_buf->data_len = 0;
3795 
3796 	rx_submit(dev, rx_buf, GFP_ATOMIC);
3797 }
3798 
3799 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3800 {
3801 	u32 tx_cmd_a;
3802 	u32 tx_cmd_b;
3803 
3804 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3805 
3806 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3807 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3808 
3809 	tx_cmd_b = 0;
3810 	if (skb_is_gso(skb)) {
3811 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3812 
3813 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3814 
3815 		tx_cmd_a |= TX_CMD_A_LSO_;
3816 	}
3817 
3818 	if (skb_vlan_tag_present(skb)) {
3819 		tx_cmd_a |= TX_CMD_A_IVTG_;
3820 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3821 	}
3822 
3823 	put_unaligned_le32(tx_cmd_a, buffer);
3824 	put_unaligned_le32(tx_cmd_b, buffer + 4);
3825 }
3826 
3827 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3828 					    struct sk_buff *tx_buf)
3829 {
3830 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3831 	int remain = dev->tx_urb_size;
3832 	u8 *tx_data = tx_buf->data;
3833 	u32 urb_len = 0;
3834 
3835 	entry->num_of_packet = 0;
3836 	entry->length = 0;
3837 
3838 	/* Work through the pending SKBs and copy the data of each SKB into
3839 	 * the URB buffer if there room for all the SKB data.
3840 	 *
3841 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3842 	 */
3843 	while (remain >= TX_SKB_MIN_LEN) {
3844 		unsigned int pending_bytes;
3845 		unsigned int align_bytes;
3846 		struct sk_buff *skb;
3847 		unsigned int len;
3848 
3849 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3850 
3851 		if (!skb)
3852 			break;
3853 
3854 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3855 			      TX_ALIGNMENT;
3856 		len = align_bytes + TX_CMD_LEN + skb->len;
3857 		if (len > remain) {
3858 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3859 			break;
3860 		}
3861 
3862 		tx_data += align_bytes;
3863 
3864 		lan78xx_fill_tx_cmd_words(skb, tx_data);
3865 		tx_data += TX_CMD_LEN;
3866 
3867 		len = skb->len;
3868 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3869 			struct net_device_stats *stats = &dev->net->stats;
3870 
3871 			stats->tx_dropped++;
3872 			dev_kfree_skb_any(skb);
3873 			tx_data -= TX_CMD_LEN;
3874 			continue;
3875 		}
3876 
3877 		tx_data += len;
3878 		entry->length += len;
3879 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3880 
3881 		dev_kfree_skb_any(skb);
3882 
3883 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3884 
3885 		remain = dev->tx_urb_size - urb_len;
3886 	}
3887 
3888 	skb_put(tx_buf, urb_len);
3889 
3890 	return entry;
3891 }
3892 
3893 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3894 {
3895 	int ret;
3896 
3897 	/* Start the stack Tx queue if it was stopped
3898 	 */
3899 	netif_tx_lock(dev->net);
3900 	if (netif_queue_stopped(dev->net)) {
3901 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3902 			netif_wake_queue(dev->net);
3903 	}
3904 	netif_tx_unlock(dev->net);
3905 
3906 	/* Go through the Tx pending queue and set up URBs to transfer
3907 	 * the data to the device. Stop if no more pending data or URBs,
3908 	 * or if an error occurs when a URB is submitted.
3909 	 */
3910 	do {
3911 		struct skb_data *entry;
3912 		struct sk_buff *tx_buf;
3913 		unsigned long flags;
3914 
3915 		if (skb_queue_empty(&dev->txq_pend))
3916 			break;
3917 
3918 		tx_buf = lan78xx_get_tx_buf(dev);
3919 		if (!tx_buf)
3920 			break;
3921 
3922 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
3923 
3924 		spin_lock_irqsave(&dev->txq.lock, flags);
3925 		ret = usb_autopm_get_interface_async(dev->intf);
3926 		if (ret < 0) {
3927 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3928 			goto out;
3929 		}
3930 
3931 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
3932 				  tx_buf->data, tx_buf->len, tx_complete,
3933 				  tx_buf);
3934 
3935 		if (tx_buf->len % dev->maxpacket == 0) {
3936 			/* send USB_ZERO_PACKET */
3937 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
3938 		}
3939 
3940 #ifdef CONFIG_PM
3941 		/* if device is asleep stop outgoing packet processing */
3942 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3943 			usb_anchor_urb(entry->urb, &dev->deferred);
3944 			netif_stop_queue(dev->net);
3945 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3946 			netdev_dbg(dev->net,
3947 				   "Delaying transmission for resumption\n");
3948 			return;
3949 		}
3950 #endif
3951 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
3952 		switch (ret) {
3953 		case 0:
3954 			netif_trans_update(dev->net);
3955 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
3956 			break;
3957 		case -EPIPE:
3958 			netif_stop_queue(dev->net);
3959 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3960 			usb_autopm_put_interface_async(dev->intf);
3961 			break;
3962 		case -ENODEV:
3963 		case -ENOENT:
3964 			netif_dbg(dev, tx_err, dev->net,
3965 				  "tx submit urb err %d (disconnected?)", ret);
3966 			netif_device_detach(dev->net);
3967 			break;
3968 		default:
3969 			usb_autopm_put_interface_async(dev->intf);
3970 			netif_dbg(dev, tx_err, dev->net,
3971 				  "tx submit urb err %d\n", ret);
3972 			break;
3973 		}
3974 
3975 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3976 
3977 		if (ret) {
3978 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
3979 out:
3980 			dev->net->stats.tx_dropped += entry->num_of_packet;
3981 			lan78xx_release_tx_buf(dev, tx_buf);
3982 		}
3983 	} while (ret == 0);
3984 }
3985 
3986 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
3987 {
3988 	struct sk_buff_head done;
3989 	struct sk_buff *rx_buf;
3990 	struct skb_data *entry;
3991 	unsigned long flags;
3992 	int work_done = 0;
3993 
3994 	/* Pass frames received in the last NAPI cycle before
3995 	 * working on newly completed URBs.
3996 	 */
3997 	while (!skb_queue_empty(&dev->rxq_overflow)) {
3998 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
3999 		++work_done;
4000 	}
4001 
4002 	/* Take a snapshot of the done queue and move items to a
4003 	 * temporary queue. Rx URB completions will continue to add
4004 	 * to the done queue.
4005 	 */
4006 	__skb_queue_head_init(&done);
4007 
4008 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4009 	skb_queue_splice_init(&dev->rxq_done, &done);
4010 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4011 
4012 	/* Extract receive frames from completed URBs and
4013 	 * pass them to the stack. Re-submit each completed URB.
4014 	 */
4015 	while ((work_done < budget) &&
4016 	       (rx_buf = __skb_dequeue(&done))) {
4017 		entry = (struct skb_data *)(rx_buf->cb);
4018 		switch (entry->state) {
4019 		case rx_done:
4020 			rx_process(dev, rx_buf, budget, &work_done);
4021 			break;
4022 		case rx_cleanup:
4023 			break;
4024 		default:
4025 			netdev_dbg(dev->net, "rx buf state %d\n",
4026 				   entry->state);
4027 			break;
4028 		}
4029 
4030 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4031 	}
4032 
4033 	/* If budget was consumed before processing all the URBs put them
4034 	 * back on the front of the done queue. They will be first to be
4035 	 * processed in the next NAPI cycle.
4036 	 */
4037 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4038 	skb_queue_splice(&done, &dev->rxq_done);
4039 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4040 
4041 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4042 		/* reset update timer delta */
4043 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4044 			dev->delta = 1;
4045 			mod_timer(&dev->stat_monitor,
4046 				  jiffies + STAT_UPDATE_TIMER);
4047 		}
4048 
4049 		/* Submit all free Rx URBs */
4050 
4051 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4052 			lan78xx_rx_urb_submit_all(dev);
4053 
4054 		/* Submit new Tx URBs */
4055 
4056 		lan78xx_tx_bh(dev);
4057 	}
4058 
4059 	return work_done;
4060 }
4061 
4062 static int lan78xx_poll(struct napi_struct *napi, int budget)
4063 {
4064 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4065 	int result = budget;
4066 	int work_done;
4067 
4068 	/* Don't do any work if the device is suspended */
4069 
4070 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4071 		napi_complete_done(napi, 0);
4072 		return 0;
4073 	}
4074 
4075 	/* Process completed URBs and submit new URBs */
4076 
4077 	work_done = lan78xx_bh(dev, budget);
4078 
4079 	if (work_done < budget) {
4080 		napi_complete_done(napi, work_done);
4081 
4082 		/* Start a new polling cycle if data was received or
4083 		 * data is waiting to be transmitted.
4084 		 */
4085 		if (!skb_queue_empty(&dev->rxq_done)) {
4086 			napi_schedule(napi);
4087 		} else if (netif_carrier_ok(dev->net)) {
4088 			if (skb_queue_empty(&dev->txq) &&
4089 			    !skb_queue_empty(&dev->txq_pend)) {
4090 				napi_schedule(napi);
4091 			} else {
4092 				netif_tx_lock(dev->net);
4093 				if (netif_queue_stopped(dev->net)) {
4094 					netif_wake_queue(dev->net);
4095 					napi_schedule(napi);
4096 				}
4097 				netif_tx_unlock(dev->net);
4098 			}
4099 		}
4100 		result = work_done;
4101 	}
4102 
4103 	return result;
4104 }
4105 
4106 static void lan78xx_delayedwork(struct work_struct *work)
4107 {
4108 	int status;
4109 	struct lan78xx_net *dev;
4110 
4111 	dev = container_of(work, struct lan78xx_net, wq.work);
4112 
4113 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4114 		return;
4115 
4116 	if (usb_autopm_get_interface(dev->intf) < 0)
4117 		return;
4118 
4119 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4120 		unlink_urbs(dev, &dev->txq);
4121 
4122 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4123 		if (status < 0 &&
4124 		    status != -EPIPE &&
4125 		    status != -ESHUTDOWN) {
4126 			if (netif_msg_tx_err(dev))
4127 				netdev_err(dev->net,
4128 					   "can't clear tx halt, status %d\n",
4129 					   status);
4130 		} else {
4131 			clear_bit(EVENT_TX_HALT, &dev->flags);
4132 			if (status != -ESHUTDOWN)
4133 				netif_wake_queue(dev->net);
4134 		}
4135 	}
4136 
4137 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4138 		unlink_urbs(dev, &dev->rxq);
4139 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4140 		if (status < 0 &&
4141 		    status != -EPIPE &&
4142 		    status != -ESHUTDOWN) {
4143 			if (netif_msg_rx_err(dev))
4144 				netdev_err(dev->net,
4145 					   "can't clear rx halt, status %d\n",
4146 					   status);
4147 		} else {
4148 			clear_bit(EVENT_RX_HALT, &dev->flags);
4149 			napi_schedule(&dev->napi);
4150 		}
4151 	}
4152 
4153 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4154 		int ret = 0;
4155 
4156 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4157 		if (lan78xx_link_reset(dev) < 0) {
4158 			netdev_info(dev->net, "link reset failed (%d)\n",
4159 				    ret);
4160 		}
4161 	}
4162 
4163 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4164 		lan78xx_update_stats(dev);
4165 
4166 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4167 
4168 		mod_timer(&dev->stat_monitor,
4169 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4170 
4171 		dev->delta = min((dev->delta * 2), 50);
4172 	}
4173 
4174 	usb_autopm_put_interface(dev->intf);
4175 }
4176 
4177 static void intr_complete(struct urb *urb)
4178 {
4179 	struct lan78xx_net *dev = urb->context;
4180 	int status = urb->status;
4181 
4182 	switch (status) {
4183 	/* success */
4184 	case 0:
4185 		lan78xx_status(dev, urb);
4186 		break;
4187 
4188 	/* software-driven interface shutdown */
4189 	case -ENOENT:			/* urb killed */
4190 	case -ENODEV:			/* hardware gone */
4191 	case -ESHUTDOWN:		/* hardware gone */
4192 		netif_dbg(dev, ifdown, dev->net,
4193 			  "intr shutdown, code %d\n", status);
4194 		return;
4195 
4196 	/* NOTE:  not throttling like RX/TX, since this endpoint
4197 	 * already polls infrequently
4198 	 */
4199 	default:
4200 		netdev_dbg(dev->net, "intr status %d\n", status);
4201 		break;
4202 	}
4203 
4204 	if (!netif_device_present(dev->net) ||
4205 	    !netif_running(dev->net)) {
4206 		netdev_warn(dev->net, "not submitting new status URB");
4207 		return;
4208 	}
4209 
4210 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4211 	status = usb_submit_urb(urb, GFP_ATOMIC);
4212 
4213 	switch (status) {
4214 	case  0:
4215 		break;
4216 	case -ENODEV:
4217 	case -ENOENT:
4218 		netif_dbg(dev, timer, dev->net,
4219 			  "intr resubmit %d (disconnect?)", status);
4220 		netif_device_detach(dev->net);
4221 		break;
4222 	default:
4223 		netif_err(dev, timer, dev->net,
4224 			  "intr resubmit --> %d\n", status);
4225 		break;
4226 	}
4227 }
4228 
4229 static void lan78xx_disconnect(struct usb_interface *intf)
4230 {
4231 	struct lan78xx_net *dev;
4232 	struct usb_device *udev;
4233 	struct net_device *net;
4234 	struct phy_device *phydev;
4235 
4236 	dev = usb_get_intfdata(intf);
4237 	usb_set_intfdata(intf, NULL);
4238 	if (!dev)
4239 		return;
4240 
4241 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4242 
4243 	netif_napi_del(&dev->napi);
4244 
4245 	udev = interface_to_usbdev(intf);
4246 	net = dev->net;
4247 
4248 	unregister_netdev(net);
4249 
4250 	cancel_delayed_work_sync(&dev->wq);
4251 
4252 	phydev = net->phydev;
4253 
4254 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4255 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4256 
4257 	phy_disconnect(net->phydev);
4258 
4259 	if (phy_is_pseudo_fixed_link(phydev))
4260 		fixed_phy_unregister(phydev);
4261 
4262 	usb_scuttle_anchored_urbs(&dev->deferred);
4263 
4264 	if (timer_pending(&dev->stat_monitor))
4265 		del_timer_sync(&dev->stat_monitor);
4266 
4267 	lan78xx_unbind(dev, intf);
4268 
4269 	lan78xx_free_tx_resources(dev);
4270 	lan78xx_free_rx_resources(dev);
4271 
4272 	usb_kill_urb(dev->urb_intr);
4273 	usb_free_urb(dev->urb_intr);
4274 
4275 	free_netdev(net);
4276 	usb_put_dev(udev);
4277 }
4278 
4279 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4280 {
4281 	struct lan78xx_net *dev = netdev_priv(net);
4282 
4283 	unlink_urbs(dev, &dev->txq);
4284 	napi_schedule(&dev->napi);
4285 }
4286 
4287 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4288 						struct net_device *netdev,
4289 						netdev_features_t features)
4290 {
4291 	struct lan78xx_net *dev = netdev_priv(netdev);
4292 
4293 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4294 		features &= ~NETIF_F_GSO_MASK;
4295 
4296 	features = vlan_features_check(skb, features);
4297 	features = vxlan_features_check(skb, features);
4298 
4299 	return features;
4300 }
4301 
4302 static const struct net_device_ops lan78xx_netdev_ops = {
4303 	.ndo_open		= lan78xx_open,
4304 	.ndo_stop		= lan78xx_stop,
4305 	.ndo_start_xmit		= lan78xx_start_xmit,
4306 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4307 	.ndo_change_mtu		= lan78xx_change_mtu,
4308 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4309 	.ndo_validate_addr	= eth_validate_addr,
4310 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4311 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4312 	.ndo_set_features	= lan78xx_set_features,
4313 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4314 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4315 	.ndo_features_check	= lan78xx_features_check,
4316 };
4317 
4318 static void lan78xx_stat_monitor(struct timer_list *t)
4319 {
4320 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4321 
4322 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4323 }
4324 
4325 static int lan78xx_probe(struct usb_interface *intf,
4326 			 const struct usb_device_id *id)
4327 {
4328 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4329 	struct lan78xx_net *dev;
4330 	struct net_device *netdev;
4331 	struct usb_device *udev;
4332 	int ret;
4333 	unsigned int maxp;
4334 	unsigned int period;
4335 	u8 *buf = NULL;
4336 
4337 	udev = interface_to_usbdev(intf);
4338 	udev = usb_get_dev(udev);
4339 
4340 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4341 	if (!netdev) {
4342 		dev_err(&intf->dev, "Error: OOM\n");
4343 		ret = -ENOMEM;
4344 		goto out1;
4345 	}
4346 
4347 	/* netdev_printk() needs this */
4348 	SET_NETDEV_DEV(netdev, &intf->dev);
4349 
4350 	dev = netdev_priv(netdev);
4351 	dev->udev = udev;
4352 	dev->intf = intf;
4353 	dev->net = netdev;
4354 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4355 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4356 
4357 	skb_queue_head_init(&dev->rxq);
4358 	skb_queue_head_init(&dev->txq);
4359 	skb_queue_head_init(&dev->rxq_done);
4360 	skb_queue_head_init(&dev->txq_pend);
4361 	skb_queue_head_init(&dev->rxq_overflow);
4362 	mutex_init(&dev->phy_mutex);
4363 	mutex_init(&dev->dev_mutex);
4364 
4365 	ret = lan78xx_urb_config_init(dev);
4366 	if (ret < 0)
4367 		goto out2;
4368 
4369 	ret = lan78xx_alloc_tx_resources(dev);
4370 	if (ret < 0)
4371 		goto out2;
4372 
4373 	ret = lan78xx_alloc_rx_resources(dev);
4374 	if (ret < 0)
4375 		goto out3;
4376 
4377 	/* MTU range: 68 - 9000 */
4378 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4379 
4380 	netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4381 
4382 	netif_napi_add(netdev, &dev->napi, lan78xx_poll, LAN78XX_NAPI_WEIGHT);
4383 
4384 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4385 	init_usb_anchor(&dev->deferred);
4386 
4387 	netdev->netdev_ops = &lan78xx_netdev_ops;
4388 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4389 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4390 
4391 	dev->delta = 1;
4392 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4393 
4394 	mutex_init(&dev->stats.access_lock);
4395 
4396 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4397 		ret = -ENODEV;
4398 		goto out4;
4399 	}
4400 
4401 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4402 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4403 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4404 		ret = -ENODEV;
4405 		goto out4;
4406 	}
4407 
4408 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4409 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4410 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4411 		ret = -ENODEV;
4412 		goto out4;
4413 	}
4414 
4415 	ep_intr = &intf->cur_altsetting->endpoint[2];
4416 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4417 		ret = -ENODEV;
4418 		goto out4;
4419 	}
4420 
4421 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4422 					usb_endpoint_num(&ep_intr->desc));
4423 
4424 	ret = lan78xx_bind(dev, intf);
4425 	if (ret < 0)
4426 		goto out4;
4427 
4428 	period = ep_intr->desc.bInterval;
4429 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
4430 	buf = kmalloc(maxp, GFP_KERNEL);
4431 	if (!buf) {
4432 		ret = -ENOMEM;
4433 		goto out5;
4434 	}
4435 
4436 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4437 	if (!dev->urb_intr) {
4438 		ret = -ENOMEM;
4439 		goto out6;
4440 	} else {
4441 		usb_fill_int_urb(dev->urb_intr, dev->udev,
4442 				 dev->pipe_intr, buf, maxp,
4443 				 intr_complete, dev, period);
4444 		dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4445 	}
4446 
4447 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4448 
4449 	/* Reject broken descriptors. */
4450 	if (dev->maxpacket == 0) {
4451 		ret = -ENODEV;
4452 		goto out6;
4453 	}
4454 
4455 	/* driver requires remote-wakeup capability during autosuspend. */
4456 	intf->needs_remote_wakeup = 1;
4457 
4458 	ret = lan78xx_phy_init(dev);
4459 	if (ret < 0)
4460 		goto out7;
4461 
4462 	ret = register_netdev(netdev);
4463 	if (ret != 0) {
4464 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4465 		goto out8;
4466 	}
4467 
4468 	usb_set_intfdata(intf, dev);
4469 
4470 	ret = device_set_wakeup_enable(&udev->dev, true);
4471 
4472 	 /* Default delay of 2sec has more overhead than advantage.
4473 	  * Set to 10sec as default.
4474 	  */
4475 	pm_runtime_set_autosuspend_delay(&udev->dev,
4476 					 DEFAULT_AUTOSUSPEND_DELAY);
4477 
4478 	return 0;
4479 
4480 out8:
4481 	phy_disconnect(netdev->phydev);
4482 out7:
4483 	usb_free_urb(dev->urb_intr);
4484 out6:
4485 	kfree(buf);
4486 out5:
4487 	lan78xx_unbind(dev, intf);
4488 out4:
4489 	netif_napi_del(&dev->napi);
4490 	lan78xx_free_rx_resources(dev);
4491 out3:
4492 	lan78xx_free_tx_resources(dev);
4493 out2:
4494 	free_netdev(netdev);
4495 out1:
4496 	usb_put_dev(udev);
4497 
4498 	return ret;
4499 }
4500 
4501 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4502 {
4503 	const u16 crc16poly = 0x8005;
4504 	int i;
4505 	u16 bit, crc, msb;
4506 	u8 data;
4507 
4508 	crc = 0xFFFF;
4509 	for (i = 0; i < len; i++) {
4510 		data = *buf++;
4511 		for (bit = 0; bit < 8; bit++) {
4512 			msb = crc >> 15;
4513 			crc <<= 1;
4514 
4515 			if (msb ^ (u16)(data & 1)) {
4516 				crc ^= crc16poly;
4517 				crc |= (u16)0x0001U;
4518 			}
4519 			data >>= 1;
4520 		}
4521 	}
4522 
4523 	return crc;
4524 }
4525 
4526 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4527 {
4528 	u32 buf;
4529 	int ret;
4530 
4531 	ret = lan78xx_stop_tx_path(dev);
4532 	if (ret < 0)
4533 		return ret;
4534 
4535 	ret = lan78xx_stop_rx_path(dev);
4536 	if (ret < 0)
4537 		return ret;
4538 
4539 	/* auto suspend (selective suspend) */
4540 
4541 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4542 	if (ret < 0)
4543 		return ret;
4544 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4545 	if (ret < 0)
4546 		return ret;
4547 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4548 	if (ret < 0)
4549 		return ret;
4550 
4551 	/* set goodframe wakeup */
4552 
4553 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4554 	if (ret < 0)
4555 		return ret;
4556 
4557 	buf |= WUCSR_RFE_WAKE_EN_;
4558 	buf |= WUCSR_STORE_WAKE_;
4559 
4560 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4561 	if (ret < 0)
4562 		return ret;
4563 
4564 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4565 	if (ret < 0)
4566 		return ret;
4567 
4568 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4569 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4570 	buf |= PMT_CTL_PHY_WAKE_EN_;
4571 	buf |= PMT_CTL_WOL_EN_;
4572 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4573 	buf |= PMT_CTL_SUS_MODE_3_;
4574 
4575 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4576 	if (ret < 0)
4577 		return ret;
4578 
4579 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4580 	if (ret < 0)
4581 		return ret;
4582 
4583 	buf |= PMT_CTL_WUPS_MASK_;
4584 
4585 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4586 	if (ret < 0)
4587 		return ret;
4588 
4589 	ret = lan78xx_start_rx_path(dev);
4590 
4591 	return ret;
4592 }
4593 
4594 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4595 {
4596 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4597 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4598 	const u8 arp_type[2] = { 0x08, 0x06 };
4599 	u32 temp_pmt_ctl;
4600 	int mask_index;
4601 	u32 temp_wucsr;
4602 	u32 buf;
4603 	u16 crc;
4604 	int ret;
4605 
4606 	ret = lan78xx_stop_tx_path(dev);
4607 	if (ret < 0)
4608 		return ret;
4609 	ret = lan78xx_stop_rx_path(dev);
4610 	if (ret < 0)
4611 		return ret;
4612 
4613 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4614 	if (ret < 0)
4615 		return ret;
4616 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4617 	if (ret < 0)
4618 		return ret;
4619 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4620 	if (ret < 0)
4621 		return ret;
4622 
4623 	temp_wucsr = 0;
4624 
4625 	temp_pmt_ctl = 0;
4626 
4627 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4628 	if (ret < 0)
4629 		return ret;
4630 
4631 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4632 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4633 
4634 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4635 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4636 		if (ret < 0)
4637 			return ret;
4638 	}
4639 
4640 	mask_index = 0;
4641 	if (wol & WAKE_PHY) {
4642 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4643 
4644 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4645 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4646 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4647 	}
4648 	if (wol & WAKE_MAGIC) {
4649 		temp_wucsr |= WUCSR_MPEN_;
4650 
4651 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4652 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4653 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4654 	}
4655 	if (wol & WAKE_BCAST) {
4656 		temp_wucsr |= WUCSR_BCST_EN_;
4657 
4658 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4659 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4660 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4661 	}
4662 	if (wol & WAKE_MCAST) {
4663 		temp_wucsr |= WUCSR_WAKE_EN_;
4664 
4665 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4666 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4667 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4668 					WUF_CFGX_EN_ |
4669 					WUF_CFGX_TYPE_MCAST_ |
4670 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4671 					(crc & WUF_CFGX_CRC16_MASK_));
4672 		if (ret < 0)
4673 			return ret;
4674 
4675 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4676 		if (ret < 0)
4677 			return ret;
4678 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4679 		if (ret < 0)
4680 			return ret;
4681 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4682 		if (ret < 0)
4683 			return ret;
4684 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4685 		if (ret < 0)
4686 			return ret;
4687 
4688 		mask_index++;
4689 
4690 		/* for IPv6 Multicast */
4691 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4692 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4693 					WUF_CFGX_EN_ |
4694 					WUF_CFGX_TYPE_MCAST_ |
4695 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4696 					(crc & WUF_CFGX_CRC16_MASK_));
4697 		if (ret < 0)
4698 			return ret;
4699 
4700 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4701 		if (ret < 0)
4702 			return ret;
4703 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4704 		if (ret < 0)
4705 			return ret;
4706 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4707 		if (ret < 0)
4708 			return ret;
4709 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4710 		if (ret < 0)
4711 			return ret;
4712 
4713 		mask_index++;
4714 
4715 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4716 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4717 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4718 	}
4719 	if (wol & WAKE_UCAST) {
4720 		temp_wucsr |= WUCSR_PFDA_EN_;
4721 
4722 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4723 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4724 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4725 	}
4726 	if (wol & WAKE_ARP) {
4727 		temp_wucsr |= WUCSR_WAKE_EN_;
4728 
4729 		/* set WUF_CFG & WUF_MASK
4730 		 * for packettype (offset 12,13) = ARP (0x0806)
4731 		 */
4732 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4733 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4734 					WUF_CFGX_EN_ |
4735 					WUF_CFGX_TYPE_ALL_ |
4736 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4737 					(crc & WUF_CFGX_CRC16_MASK_));
4738 		if (ret < 0)
4739 			return ret;
4740 
4741 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4742 		if (ret < 0)
4743 			return ret;
4744 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4745 		if (ret < 0)
4746 			return ret;
4747 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4748 		if (ret < 0)
4749 			return ret;
4750 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4751 		if (ret < 0)
4752 			return ret;
4753 
4754 		mask_index++;
4755 
4756 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4757 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4758 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4759 	}
4760 
4761 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4762 	if (ret < 0)
4763 		return ret;
4764 
4765 	/* when multiple WOL bits are set */
4766 	if (hweight_long((unsigned long)wol) > 1) {
4767 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4768 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4769 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4770 	}
4771 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4772 	if (ret < 0)
4773 		return ret;
4774 
4775 	/* clear WUPS */
4776 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4777 	if (ret < 0)
4778 		return ret;
4779 
4780 	buf |= PMT_CTL_WUPS_MASK_;
4781 
4782 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4783 	if (ret < 0)
4784 		return ret;
4785 
4786 	ret = lan78xx_start_rx_path(dev);
4787 
4788 	return ret;
4789 }
4790 
4791 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4792 {
4793 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4794 	bool dev_open;
4795 	int ret;
4796 
4797 	mutex_lock(&dev->dev_mutex);
4798 
4799 	netif_dbg(dev, ifdown, dev->net,
4800 		  "suspending: pm event %#x", message.event);
4801 
4802 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4803 
4804 	if (dev_open) {
4805 		spin_lock_irq(&dev->txq.lock);
4806 		/* don't autosuspend while transmitting */
4807 		if ((skb_queue_len(&dev->txq) ||
4808 		     skb_queue_len(&dev->txq_pend)) &&
4809 		    PMSG_IS_AUTO(message)) {
4810 			spin_unlock_irq(&dev->txq.lock);
4811 			ret = -EBUSY;
4812 			goto out;
4813 		} else {
4814 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4815 			spin_unlock_irq(&dev->txq.lock);
4816 		}
4817 
4818 		/* stop RX */
4819 		ret = lan78xx_stop_rx_path(dev);
4820 		if (ret < 0)
4821 			goto out;
4822 
4823 		ret = lan78xx_flush_rx_fifo(dev);
4824 		if (ret < 0)
4825 			goto out;
4826 
4827 		/* stop Tx */
4828 		ret = lan78xx_stop_tx_path(dev);
4829 		if (ret < 0)
4830 			goto out;
4831 
4832 		/* empty out the Rx and Tx queues */
4833 		netif_device_detach(dev->net);
4834 		lan78xx_terminate_urbs(dev);
4835 		usb_kill_urb(dev->urb_intr);
4836 
4837 		/* reattach */
4838 		netif_device_attach(dev->net);
4839 
4840 		del_timer(&dev->stat_monitor);
4841 
4842 		if (PMSG_IS_AUTO(message)) {
4843 			ret = lan78xx_set_auto_suspend(dev);
4844 			if (ret < 0)
4845 				goto out;
4846 		} else {
4847 			struct lan78xx_priv *pdata;
4848 
4849 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4850 			netif_carrier_off(dev->net);
4851 			ret = lan78xx_set_suspend(dev, pdata->wol);
4852 			if (ret < 0)
4853 				goto out;
4854 		}
4855 	} else {
4856 		/* Interface is down; don't allow WOL and PHY
4857 		 * events to wake up the host
4858 		 */
4859 		u32 buf;
4860 
4861 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4862 
4863 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4864 		if (ret < 0)
4865 			goto out;
4866 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4867 		if (ret < 0)
4868 			goto out;
4869 
4870 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4871 		if (ret < 0)
4872 			goto out;
4873 
4874 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4875 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4876 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4877 		buf |= PMT_CTL_SUS_MODE_3_;
4878 
4879 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4880 		if (ret < 0)
4881 			goto out;
4882 
4883 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4884 		if (ret < 0)
4885 			goto out;
4886 
4887 		buf |= PMT_CTL_WUPS_MASK_;
4888 
4889 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4890 		if (ret < 0)
4891 			goto out;
4892 	}
4893 
4894 	ret = 0;
4895 out:
4896 	mutex_unlock(&dev->dev_mutex);
4897 
4898 	return ret;
4899 }
4900 
4901 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4902 {
4903 	bool pipe_halted = false;
4904 	struct urb *urb;
4905 
4906 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4907 		struct sk_buff *skb = urb->context;
4908 		int ret;
4909 
4910 		if (!netif_device_present(dev->net) ||
4911 		    !netif_carrier_ok(dev->net) ||
4912 		    pipe_halted) {
4913 			lan78xx_release_tx_buf(dev, skb);
4914 			continue;
4915 		}
4916 
4917 		ret = usb_submit_urb(urb, GFP_ATOMIC);
4918 
4919 		if (ret == 0) {
4920 			netif_trans_update(dev->net);
4921 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4922 		} else {
4923 			if (ret == -EPIPE) {
4924 				netif_stop_queue(dev->net);
4925 				pipe_halted = true;
4926 			} else if (ret == -ENODEV) {
4927 				netif_device_detach(dev->net);
4928 			}
4929 
4930 			lan78xx_release_tx_buf(dev, skb);
4931 		}
4932 	}
4933 
4934 	return pipe_halted;
4935 }
4936 
4937 static int lan78xx_resume(struct usb_interface *intf)
4938 {
4939 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4940 	bool dev_open;
4941 	int ret;
4942 
4943 	mutex_lock(&dev->dev_mutex);
4944 
4945 	netif_dbg(dev, ifup, dev->net, "resuming device");
4946 
4947 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4948 
4949 	if (dev_open) {
4950 		bool pipe_halted = false;
4951 
4952 		ret = lan78xx_flush_tx_fifo(dev);
4953 		if (ret < 0)
4954 			goto out;
4955 
4956 		if (dev->urb_intr) {
4957 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4958 
4959 			if (ret < 0) {
4960 				if (ret == -ENODEV)
4961 					netif_device_detach(dev->net);
4962 				netdev_warn(dev->net, "Failed to submit intr URB");
4963 			}
4964 		}
4965 
4966 		spin_lock_irq(&dev->txq.lock);
4967 
4968 		if (netif_device_present(dev->net)) {
4969 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
4970 
4971 			if (pipe_halted)
4972 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4973 		}
4974 
4975 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4976 
4977 		spin_unlock_irq(&dev->txq.lock);
4978 
4979 		if (!pipe_halted &&
4980 		    netif_device_present(dev->net) &&
4981 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
4982 			netif_start_queue(dev->net);
4983 
4984 		ret = lan78xx_start_tx_path(dev);
4985 		if (ret < 0)
4986 			goto out;
4987 
4988 		napi_schedule(&dev->napi);
4989 
4990 		if (!timer_pending(&dev->stat_monitor)) {
4991 			dev->delta = 1;
4992 			mod_timer(&dev->stat_monitor,
4993 				  jiffies + STAT_UPDATE_TIMER);
4994 		}
4995 
4996 	} else {
4997 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4998 	}
4999 
5000 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5001 	if (ret < 0)
5002 		goto out;
5003 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5004 	if (ret < 0)
5005 		goto out;
5006 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5007 	if (ret < 0)
5008 		goto out;
5009 
5010 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5011 					     WUCSR2_ARP_RCD_ |
5012 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5013 					     WUCSR2_IPV4_TCPSYN_RCD_);
5014 	if (ret < 0)
5015 		goto out;
5016 
5017 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5018 					    WUCSR_EEE_RX_WAKE_ |
5019 					    WUCSR_PFDA_FR_ |
5020 					    WUCSR_RFE_WAKE_FR_ |
5021 					    WUCSR_WUFR_ |
5022 					    WUCSR_MPR_ |
5023 					    WUCSR_BCST_FR_);
5024 	if (ret < 0)
5025 		goto out;
5026 
5027 	ret = 0;
5028 out:
5029 	mutex_unlock(&dev->dev_mutex);
5030 
5031 	return ret;
5032 }
5033 
5034 static int lan78xx_reset_resume(struct usb_interface *intf)
5035 {
5036 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5037 	int ret;
5038 
5039 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5040 
5041 	ret = lan78xx_reset(dev);
5042 	if (ret < 0)
5043 		return ret;
5044 
5045 	phy_start(dev->net->phydev);
5046 
5047 	ret = lan78xx_resume(intf);
5048 
5049 	return ret;
5050 }
5051 
5052 static const struct usb_device_id products[] = {
5053 	{
5054 	/* LAN7800 USB Gigabit Ethernet Device */
5055 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5056 	},
5057 	{
5058 	/* LAN7850 USB Gigabit Ethernet Device */
5059 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5060 	},
5061 	{
5062 	/* LAN7801 USB Gigabit Ethernet Device */
5063 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5064 	},
5065 	{
5066 	/* ATM2-AF USB Gigabit Ethernet Device */
5067 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5068 	},
5069 	{},
5070 };
5071 MODULE_DEVICE_TABLE(usb, products);
5072 
5073 static struct usb_driver lan78xx_driver = {
5074 	.name			= DRIVER_NAME,
5075 	.id_table		= products,
5076 	.probe			= lan78xx_probe,
5077 	.disconnect		= lan78xx_disconnect,
5078 	.suspend		= lan78xx_suspend,
5079 	.resume			= lan78xx_resume,
5080 	.reset_resume		= lan78xx_reset_resume,
5081 	.supports_autosuspend	= 1,
5082 	.disable_hub_initiated_lpm = 1,
5083 };
5084 
5085 module_usb_driver(lan78xx_driver);
5086 
5087 MODULE_AUTHOR(DRIVER_AUTHOR);
5088 MODULE_DESCRIPTION(DRIVER_DESC);
5089 MODULE_LICENSE("GPL");
5090