xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision c24c57a4)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
50 #define DEFAULT_BULK_IN_DELAY		(0x0800)
51 #define MAX_SINGLE_PACKET_SIZE		(9000)
52 #define DEFAULT_TX_CSUM_ENABLE		(true)
53 #define DEFAULT_RX_CSUM_ENABLE		(true)
54 #define DEFAULT_TSO_CSUM_ENABLE		(true)
55 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
56 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
57 #define TX_OVERHEAD			(8)
58 #define RXW_PADDING			2
59 
60 #define LAN78XX_USB_VENDOR_ID		(0x0424)
61 #define LAN7800_USB_PRODUCT_ID		(0x7800)
62 #define LAN7850_USB_PRODUCT_ID		(0x7850)
63 #define LAN7801_USB_PRODUCT_ID		(0x7801)
64 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
65 #define LAN78XX_OTP_MAGIC		(0x78F3)
66 
67 #define	MII_READ			1
68 #define	MII_WRITE			0
69 
70 #define EEPROM_INDICATOR		(0xA5)
71 #define EEPROM_MAC_OFFSET		(0x01)
72 #define MAX_EEPROM_SIZE			512
73 #define OTP_INDICATOR_1			(0xF3)
74 #define OTP_INDICATOR_2			(0xF7)
75 
76 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
77 					 WAKE_MCAST | WAKE_BCAST | \
78 					 WAKE_ARP | WAKE_MAGIC)
79 
80 /* USB related defines */
81 #define BULK_IN_PIPE			1
82 #define BULK_OUT_PIPE			2
83 
84 /* default autosuspend delay (mSec)*/
85 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
86 
87 /* statistic update interval (mSec) */
88 #define STAT_UPDATE_TIMER		(1 * 1000)
89 
90 /* defines interrupts from interrupt EP */
91 #define MAX_INT_EP			(32)
92 #define INT_EP_INTEP			(31)
93 #define INT_EP_OTP_WR_DONE		(28)
94 #define INT_EP_EEE_TX_LPI_START		(26)
95 #define INT_EP_EEE_TX_LPI_STOP		(25)
96 #define INT_EP_EEE_RX_LPI		(24)
97 #define INT_EP_MAC_RESET_TIMEOUT	(23)
98 #define INT_EP_RDFO			(22)
99 #define INT_EP_TXE			(21)
100 #define INT_EP_USB_STATUS		(20)
101 #define INT_EP_TX_DIS			(19)
102 #define INT_EP_RX_DIS			(18)
103 #define INT_EP_PHY			(17)
104 #define INT_EP_DP			(16)
105 #define INT_EP_MAC_ERR			(15)
106 #define INT_EP_TDFU			(14)
107 #define INT_EP_TDFO			(13)
108 #define INT_EP_UTX			(12)
109 #define INT_EP_GPIO_11			(11)
110 #define INT_EP_GPIO_10			(10)
111 #define INT_EP_GPIO_9			(9)
112 #define INT_EP_GPIO_8			(8)
113 #define INT_EP_GPIO_7			(7)
114 #define INT_EP_GPIO_6			(6)
115 #define INT_EP_GPIO_5			(5)
116 #define INT_EP_GPIO_4			(4)
117 #define INT_EP_GPIO_3			(3)
118 #define INT_EP_GPIO_2			(2)
119 #define INT_EP_GPIO_1			(1)
120 #define INT_EP_GPIO_0			(0)
121 
122 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
123 	"RX FCS Errors",
124 	"RX Alignment Errors",
125 	"Rx Fragment Errors",
126 	"RX Jabber Errors",
127 	"RX Undersize Frame Errors",
128 	"RX Oversize Frame Errors",
129 	"RX Dropped Frames",
130 	"RX Unicast Byte Count",
131 	"RX Broadcast Byte Count",
132 	"RX Multicast Byte Count",
133 	"RX Unicast Frames",
134 	"RX Broadcast Frames",
135 	"RX Multicast Frames",
136 	"RX Pause Frames",
137 	"RX 64 Byte Frames",
138 	"RX 65 - 127 Byte Frames",
139 	"RX 128 - 255 Byte Frames",
140 	"RX 256 - 511 Bytes Frames",
141 	"RX 512 - 1023 Byte Frames",
142 	"RX 1024 - 1518 Byte Frames",
143 	"RX Greater 1518 Byte Frames",
144 	"EEE RX LPI Transitions",
145 	"EEE RX LPI Time",
146 	"TX FCS Errors",
147 	"TX Excess Deferral Errors",
148 	"TX Carrier Errors",
149 	"TX Bad Byte Count",
150 	"TX Single Collisions",
151 	"TX Multiple Collisions",
152 	"TX Excessive Collision",
153 	"TX Late Collisions",
154 	"TX Unicast Byte Count",
155 	"TX Broadcast Byte Count",
156 	"TX Multicast Byte Count",
157 	"TX Unicast Frames",
158 	"TX Broadcast Frames",
159 	"TX Multicast Frames",
160 	"TX Pause Frames",
161 	"TX 64 Byte Frames",
162 	"TX 65 - 127 Byte Frames",
163 	"TX 128 - 255 Byte Frames",
164 	"TX 256 - 511 Bytes Frames",
165 	"TX 512 - 1023 Byte Frames",
166 	"TX 1024 - 1518 Byte Frames",
167 	"TX Greater 1518 Byte Frames",
168 	"EEE TX LPI Transitions",
169 	"EEE TX LPI Time",
170 };
171 
172 struct lan78xx_statstage {
173 	u32 rx_fcs_errors;
174 	u32 rx_alignment_errors;
175 	u32 rx_fragment_errors;
176 	u32 rx_jabber_errors;
177 	u32 rx_undersize_frame_errors;
178 	u32 rx_oversize_frame_errors;
179 	u32 rx_dropped_frames;
180 	u32 rx_unicast_byte_count;
181 	u32 rx_broadcast_byte_count;
182 	u32 rx_multicast_byte_count;
183 	u32 rx_unicast_frames;
184 	u32 rx_broadcast_frames;
185 	u32 rx_multicast_frames;
186 	u32 rx_pause_frames;
187 	u32 rx_64_byte_frames;
188 	u32 rx_65_127_byte_frames;
189 	u32 rx_128_255_byte_frames;
190 	u32 rx_256_511_bytes_frames;
191 	u32 rx_512_1023_byte_frames;
192 	u32 rx_1024_1518_byte_frames;
193 	u32 rx_greater_1518_byte_frames;
194 	u32 eee_rx_lpi_transitions;
195 	u32 eee_rx_lpi_time;
196 	u32 tx_fcs_errors;
197 	u32 tx_excess_deferral_errors;
198 	u32 tx_carrier_errors;
199 	u32 tx_bad_byte_count;
200 	u32 tx_single_collisions;
201 	u32 tx_multiple_collisions;
202 	u32 tx_excessive_collision;
203 	u32 tx_late_collisions;
204 	u32 tx_unicast_byte_count;
205 	u32 tx_broadcast_byte_count;
206 	u32 tx_multicast_byte_count;
207 	u32 tx_unicast_frames;
208 	u32 tx_broadcast_frames;
209 	u32 tx_multicast_frames;
210 	u32 tx_pause_frames;
211 	u32 tx_64_byte_frames;
212 	u32 tx_65_127_byte_frames;
213 	u32 tx_128_255_byte_frames;
214 	u32 tx_256_511_bytes_frames;
215 	u32 tx_512_1023_byte_frames;
216 	u32 tx_1024_1518_byte_frames;
217 	u32 tx_greater_1518_byte_frames;
218 	u32 eee_tx_lpi_transitions;
219 	u32 eee_tx_lpi_time;
220 };
221 
222 struct lan78xx_statstage64 {
223 	u64 rx_fcs_errors;
224 	u64 rx_alignment_errors;
225 	u64 rx_fragment_errors;
226 	u64 rx_jabber_errors;
227 	u64 rx_undersize_frame_errors;
228 	u64 rx_oversize_frame_errors;
229 	u64 rx_dropped_frames;
230 	u64 rx_unicast_byte_count;
231 	u64 rx_broadcast_byte_count;
232 	u64 rx_multicast_byte_count;
233 	u64 rx_unicast_frames;
234 	u64 rx_broadcast_frames;
235 	u64 rx_multicast_frames;
236 	u64 rx_pause_frames;
237 	u64 rx_64_byte_frames;
238 	u64 rx_65_127_byte_frames;
239 	u64 rx_128_255_byte_frames;
240 	u64 rx_256_511_bytes_frames;
241 	u64 rx_512_1023_byte_frames;
242 	u64 rx_1024_1518_byte_frames;
243 	u64 rx_greater_1518_byte_frames;
244 	u64 eee_rx_lpi_transitions;
245 	u64 eee_rx_lpi_time;
246 	u64 tx_fcs_errors;
247 	u64 tx_excess_deferral_errors;
248 	u64 tx_carrier_errors;
249 	u64 tx_bad_byte_count;
250 	u64 tx_single_collisions;
251 	u64 tx_multiple_collisions;
252 	u64 tx_excessive_collision;
253 	u64 tx_late_collisions;
254 	u64 tx_unicast_byte_count;
255 	u64 tx_broadcast_byte_count;
256 	u64 tx_multicast_byte_count;
257 	u64 tx_unicast_frames;
258 	u64 tx_broadcast_frames;
259 	u64 tx_multicast_frames;
260 	u64 tx_pause_frames;
261 	u64 tx_64_byte_frames;
262 	u64 tx_65_127_byte_frames;
263 	u64 tx_128_255_byte_frames;
264 	u64 tx_256_511_bytes_frames;
265 	u64 tx_512_1023_byte_frames;
266 	u64 tx_1024_1518_byte_frames;
267 	u64 tx_greater_1518_byte_frames;
268 	u64 eee_tx_lpi_transitions;
269 	u64 eee_tx_lpi_time;
270 };
271 
272 static u32 lan78xx_regs[] = {
273 	ID_REV,
274 	INT_STS,
275 	HW_CFG,
276 	PMT_CTL,
277 	E2P_CMD,
278 	E2P_DATA,
279 	USB_STATUS,
280 	VLAN_TYPE,
281 	MAC_CR,
282 	MAC_RX,
283 	MAC_TX,
284 	FLOW,
285 	ERR_STS,
286 	MII_ACC,
287 	MII_DATA,
288 	EEE_TX_LPI_REQ_DLY,
289 	EEE_TW_TX_SYS,
290 	EEE_TX_LPI_REM_DLY,
291 	WUCSR
292 };
293 
294 #define PHY_REG_SIZE (32 * sizeof(u32))
295 
296 struct lan78xx_net;
297 
298 struct lan78xx_priv {
299 	struct lan78xx_net *dev;
300 	u32 rfe_ctl;
301 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
302 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
303 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
304 	struct mutex dataport_mutex; /* for dataport access */
305 	spinlock_t rfe_ctl_lock; /* for rfe register access */
306 	struct work_struct set_multicast;
307 	struct work_struct set_vlan;
308 	u32 wol;
309 };
310 
311 enum skb_state {
312 	illegal = 0,
313 	tx_start,
314 	tx_done,
315 	rx_start,
316 	rx_done,
317 	rx_cleanup,
318 	unlink_start
319 };
320 
321 struct skb_data {		/* skb->cb is one of these */
322 	struct urb *urb;
323 	struct lan78xx_net *dev;
324 	enum skb_state state;
325 	size_t length;
326 	int num_of_packet;
327 };
328 
329 struct usb_context {
330 	struct usb_ctrlrequest req;
331 	struct lan78xx_net *dev;
332 };
333 
334 #define EVENT_TX_HALT			0
335 #define EVENT_RX_HALT			1
336 #define EVENT_RX_MEMORY			2
337 #define EVENT_STS_SPLIT			3
338 #define EVENT_LINK_RESET		4
339 #define EVENT_RX_PAUSED			5
340 #define EVENT_DEV_WAKING		6
341 #define EVENT_DEV_ASLEEP		7
342 #define EVENT_DEV_OPEN			8
343 #define EVENT_STAT_UPDATE		9
344 
345 struct statstage {
346 	struct mutex			access_lock;	/* for stats access */
347 	struct lan78xx_statstage	saved;
348 	struct lan78xx_statstage	rollover_count;
349 	struct lan78xx_statstage	rollover_max;
350 	struct lan78xx_statstage64	curr_stat;
351 };
352 
353 struct irq_domain_data {
354 	struct irq_domain	*irqdomain;
355 	unsigned int		phyirq;
356 	struct irq_chip		*irqchip;
357 	irq_flow_handler_t	irq_handler;
358 	u32			irqenable;
359 	struct mutex		irq_lock;		/* for irq bus access */
360 };
361 
362 struct lan78xx_net {
363 	struct net_device	*net;
364 	struct usb_device	*udev;
365 	struct usb_interface	*intf;
366 	void			*driver_priv;
367 
368 	int			rx_qlen;
369 	int			tx_qlen;
370 	struct sk_buff_head	rxq;
371 	struct sk_buff_head	txq;
372 	struct sk_buff_head	done;
373 	struct sk_buff_head	rxq_pause;
374 	struct sk_buff_head	txq_pend;
375 
376 	struct tasklet_struct	bh;
377 	struct delayed_work	wq;
378 
379 	struct usb_host_endpoint *ep_blkin;
380 	struct usb_host_endpoint *ep_blkout;
381 	struct usb_host_endpoint *ep_intr;
382 
383 	int			msg_enable;
384 
385 	struct urb		*urb_intr;
386 	struct usb_anchor	deferred;
387 
388 	struct mutex		phy_mutex; /* for phy access */
389 	unsigned		pipe_in, pipe_out, pipe_intr;
390 
391 	u32			hard_mtu;	/* count any extra framing */
392 	size_t			rx_urb_size;	/* size for rx urbs */
393 
394 	unsigned long		flags;
395 
396 	wait_queue_head_t	*wait;
397 	unsigned char		suspend_count;
398 
399 	unsigned		maxpacket;
400 	struct timer_list	delay;
401 	struct timer_list	stat_monitor;
402 
403 	unsigned long		data[5];
404 
405 	int			link_on;
406 	u8			mdix_ctrl;
407 
408 	u32			chipid;
409 	u32			chiprev;
410 	struct mii_bus		*mdiobus;
411 	phy_interface_t		interface;
412 
413 	int			fc_autoneg;
414 	u8			fc_request_control;
415 
416 	int			delta;
417 	struct statstage	stats;
418 
419 	struct irq_domain_data	domain_data;
420 };
421 
422 /* define external phy id */
423 #define	PHY_LAN8835			(0x0007C130)
424 #define	PHY_KSZ9031RNX			(0x00221620)
425 
426 /* use ethtool to change the level for any given device */
427 static int msg_level = -1;
428 module_param(msg_level, int, 0);
429 MODULE_PARM_DESC(msg_level, "Override default message level");
430 
431 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
432 {
433 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
434 	int ret;
435 
436 	if (!buf)
437 		return -ENOMEM;
438 
439 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
440 			      USB_VENDOR_REQUEST_READ_REGISTER,
441 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
442 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
443 	if (likely(ret >= 0)) {
444 		le32_to_cpus(buf);
445 		*data = *buf;
446 	} else {
447 		netdev_warn(dev->net,
448 			    "Failed to read register index 0x%08x. ret = %d",
449 			    index, ret);
450 	}
451 
452 	kfree(buf);
453 
454 	return ret;
455 }
456 
457 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
458 {
459 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
460 	int ret;
461 
462 	if (!buf)
463 		return -ENOMEM;
464 
465 	*buf = data;
466 	cpu_to_le32s(buf);
467 
468 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
469 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
470 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
471 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
472 	if (unlikely(ret < 0)) {
473 		netdev_warn(dev->net,
474 			    "Failed to write register index 0x%08x. ret = %d",
475 			    index, ret);
476 	}
477 
478 	kfree(buf);
479 
480 	return ret;
481 }
482 
483 static int lan78xx_read_stats(struct lan78xx_net *dev,
484 			      struct lan78xx_statstage *data)
485 {
486 	int ret = 0;
487 	int i;
488 	struct lan78xx_statstage *stats;
489 	u32 *src;
490 	u32 *dst;
491 
492 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
493 	if (!stats)
494 		return -ENOMEM;
495 
496 	ret = usb_control_msg(dev->udev,
497 			      usb_rcvctrlpipe(dev->udev, 0),
498 			      USB_VENDOR_REQUEST_GET_STATS,
499 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
500 			      0,
501 			      0,
502 			      (void *)stats,
503 			      sizeof(*stats),
504 			      USB_CTRL_SET_TIMEOUT);
505 	if (likely(ret >= 0)) {
506 		src = (u32 *)stats;
507 		dst = (u32 *)data;
508 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
509 			le32_to_cpus(&src[i]);
510 			dst[i] = src[i];
511 		}
512 	} else {
513 		netdev_warn(dev->net,
514 			    "Failed to read stat ret = 0x%x", ret);
515 	}
516 
517 	kfree(stats);
518 
519 	return ret;
520 }
521 
522 #define check_counter_rollover(struct1, dev_stats, member) {	\
523 	if (struct1->member < dev_stats.saved.member)		\
524 		dev_stats.rollover_count.member++;		\
525 	}
526 
527 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
528 					struct lan78xx_statstage *stats)
529 {
530 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
531 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
532 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
533 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
534 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
535 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
536 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
537 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
538 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
539 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
540 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
541 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
542 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
543 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
544 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
545 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
546 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
547 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
548 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
549 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
550 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
551 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
552 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
553 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
554 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
555 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
556 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
557 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
558 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
559 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
560 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
561 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
562 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
563 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
564 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
565 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
566 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
567 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
568 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
569 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
570 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
571 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
572 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
573 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
574 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
575 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
576 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
577 
578 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
579 }
580 
581 static void lan78xx_update_stats(struct lan78xx_net *dev)
582 {
583 	u32 *p, *count, *max;
584 	u64 *data;
585 	int i;
586 	struct lan78xx_statstage lan78xx_stats;
587 
588 	if (usb_autopm_get_interface(dev->intf) < 0)
589 		return;
590 
591 	p = (u32 *)&lan78xx_stats;
592 	count = (u32 *)&dev->stats.rollover_count;
593 	max = (u32 *)&dev->stats.rollover_max;
594 	data = (u64 *)&dev->stats.curr_stat;
595 
596 	mutex_lock(&dev->stats.access_lock);
597 
598 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
599 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
600 
601 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
602 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
603 
604 	mutex_unlock(&dev->stats.access_lock);
605 
606 	usb_autopm_put_interface(dev->intf);
607 }
608 
609 /* Loop until the read is completed with timeout called with phy_mutex held */
610 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
611 {
612 	unsigned long start_time = jiffies;
613 	u32 val;
614 	int ret;
615 
616 	do {
617 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
618 		if (unlikely(ret < 0))
619 			return -EIO;
620 
621 		if (!(val & MII_ACC_MII_BUSY_))
622 			return 0;
623 	} while (!time_after(jiffies, start_time + HZ));
624 
625 	return -EIO;
626 }
627 
628 static inline u32 mii_access(int id, int index, int read)
629 {
630 	u32 ret;
631 
632 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
633 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
634 	if (read)
635 		ret |= MII_ACC_MII_READ_;
636 	else
637 		ret |= MII_ACC_MII_WRITE_;
638 	ret |= MII_ACC_MII_BUSY_;
639 
640 	return ret;
641 }
642 
643 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
644 {
645 	unsigned long start_time = jiffies;
646 	u32 val;
647 	int ret;
648 
649 	do {
650 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
651 		if (unlikely(ret < 0))
652 			return -EIO;
653 
654 		if (!(val & E2P_CMD_EPC_BUSY_) ||
655 		    (val & E2P_CMD_EPC_TIMEOUT_))
656 			break;
657 		usleep_range(40, 100);
658 	} while (!time_after(jiffies, start_time + HZ));
659 
660 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
661 		netdev_warn(dev->net, "EEPROM read operation timeout");
662 		return -EIO;
663 	}
664 
665 	return 0;
666 }
667 
668 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
669 {
670 	unsigned long start_time = jiffies;
671 	u32 val;
672 	int ret;
673 
674 	do {
675 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
676 		if (unlikely(ret < 0))
677 			return -EIO;
678 
679 		if (!(val & E2P_CMD_EPC_BUSY_))
680 			return 0;
681 
682 		usleep_range(40, 100);
683 	} while (!time_after(jiffies, start_time + HZ));
684 
685 	netdev_warn(dev->net, "EEPROM is busy");
686 	return -EIO;
687 }
688 
689 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
690 				   u32 length, u8 *data)
691 {
692 	u32 val;
693 	u32 saved;
694 	int i, ret;
695 	int retval;
696 
697 	/* depends on chip, some EEPROM pins are muxed with LED function.
698 	 * disable & restore LED function to access EEPROM.
699 	 */
700 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
701 	saved = val;
702 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
703 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
704 		ret = lan78xx_write_reg(dev, HW_CFG, val);
705 	}
706 
707 	retval = lan78xx_eeprom_confirm_not_busy(dev);
708 	if (retval)
709 		return retval;
710 
711 	for (i = 0; i < length; i++) {
712 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
713 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
714 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
715 		if (unlikely(ret < 0)) {
716 			retval = -EIO;
717 			goto exit;
718 		}
719 
720 		retval = lan78xx_wait_eeprom(dev);
721 		if (retval < 0)
722 			goto exit;
723 
724 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
725 		if (unlikely(ret < 0)) {
726 			retval = -EIO;
727 			goto exit;
728 		}
729 
730 		data[i] = val & 0xFF;
731 		offset++;
732 	}
733 
734 	retval = 0;
735 exit:
736 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
737 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
738 
739 	return retval;
740 }
741 
742 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
743 			       u32 length, u8 *data)
744 {
745 	u8 sig;
746 	int ret;
747 
748 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
749 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
750 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
751 	else
752 		ret = -EINVAL;
753 
754 	return ret;
755 }
756 
757 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
758 				    u32 length, u8 *data)
759 {
760 	u32 val;
761 	u32 saved;
762 	int i, ret;
763 	int retval;
764 
765 	/* depends on chip, some EEPROM pins are muxed with LED function.
766 	 * disable & restore LED function to access EEPROM.
767 	 */
768 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
769 	saved = val;
770 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
771 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
772 		ret = lan78xx_write_reg(dev, HW_CFG, val);
773 	}
774 
775 	retval = lan78xx_eeprom_confirm_not_busy(dev);
776 	if (retval)
777 		goto exit;
778 
779 	/* Issue write/erase enable command */
780 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
781 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
782 	if (unlikely(ret < 0)) {
783 		retval = -EIO;
784 		goto exit;
785 	}
786 
787 	retval = lan78xx_wait_eeprom(dev);
788 	if (retval < 0)
789 		goto exit;
790 
791 	for (i = 0; i < length; i++) {
792 		/* Fill data register */
793 		val = data[i];
794 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
795 		if (ret < 0) {
796 			retval = -EIO;
797 			goto exit;
798 		}
799 
800 		/* Send "write" command */
801 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
802 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
803 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
804 		if (ret < 0) {
805 			retval = -EIO;
806 			goto exit;
807 		}
808 
809 		retval = lan78xx_wait_eeprom(dev);
810 		if (retval < 0)
811 			goto exit;
812 
813 		offset++;
814 	}
815 
816 	retval = 0;
817 exit:
818 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
819 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
820 
821 	return retval;
822 }
823 
824 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
825 				u32 length, u8 *data)
826 {
827 	int i;
828 	int ret;
829 	u32 buf;
830 	unsigned long timeout;
831 
832 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
833 
834 	if (buf & OTP_PWR_DN_PWRDN_N_) {
835 		/* clear it and wait to be cleared */
836 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
837 
838 		timeout = jiffies + HZ;
839 		do {
840 			usleep_range(1, 10);
841 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
842 			if (time_after(jiffies, timeout)) {
843 				netdev_warn(dev->net,
844 					    "timeout on OTP_PWR_DN");
845 				return -EIO;
846 			}
847 		} while (buf & OTP_PWR_DN_PWRDN_N_);
848 	}
849 
850 	for (i = 0; i < length; i++) {
851 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
852 					((offset + i) >> 8) & OTP_ADDR1_15_11);
853 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
854 					((offset + i) & OTP_ADDR2_10_3));
855 
856 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
857 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
858 
859 		timeout = jiffies + HZ;
860 		do {
861 			udelay(1);
862 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
863 			if (time_after(jiffies, timeout)) {
864 				netdev_warn(dev->net,
865 					    "timeout on OTP_STATUS");
866 				return -EIO;
867 			}
868 		} while (buf & OTP_STATUS_BUSY_);
869 
870 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
871 
872 		data[i] = (u8)(buf & 0xFF);
873 	}
874 
875 	return 0;
876 }
877 
878 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
879 				 u32 length, u8 *data)
880 {
881 	int i;
882 	int ret;
883 	u32 buf;
884 	unsigned long timeout;
885 
886 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
887 
888 	if (buf & OTP_PWR_DN_PWRDN_N_) {
889 		/* clear it and wait to be cleared */
890 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
891 
892 		timeout = jiffies + HZ;
893 		do {
894 			udelay(1);
895 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
896 			if (time_after(jiffies, timeout)) {
897 				netdev_warn(dev->net,
898 					    "timeout on OTP_PWR_DN completion");
899 				return -EIO;
900 			}
901 		} while (buf & OTP_PWR_DN_PWRDN_N_);
902 	}
903 
904 	/* set to BYTE program mode */
905 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
906 
907 	for (i = 0; i < length; i++) {
908 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
909 					((offset + i) >> 8) & OTP_ADDR1_15_11);
910 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
911 					((offset + i) & OTP_ADDR2_10_3));
912 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
913 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
914 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
915 
916 		timeout = jiffies + HZ;
917 		do {
918 			udelay(1);
919 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
920 			if (time_after(jiffies, timeout)) {
921 				netdev_warn(dev->net,
922 					    "Timeout on OTP_STATUS completion");
923 				return -EIO;
924 			}
925 		} while (buf & OTP_STATUS_BUSY_);
926 	}
927 
928 	return 0;
929 }
930 
931 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
932 			    u32 length, u8 *data)
933 {
934 	u8 sig;
935 	int ret;
936 
937 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
938 
939 	if (ret == 0) {
940 		if (sig == OTP_INDICATOR_2)
941 			offset += 0x100;
942 		else if (sig != OTP_INDICATOR_1)
943 			ret = -EINVAL;
944 		if (!ret)
945 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
946 	}
947 
948 	return ret;
949 }
950 
951 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
952 {
953 	int i, ret;
954 
955 	for (i = 0; i < 100; i++) {
956 		u32 dp_sel;
957 
958 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
959 		if (unlikely(ret < 0))
960 			return -EIO;
961 
962 		if (dp_sel & DP_SEL_DPRDY_)
963 			return 0;
964 
965 		usleep_range(40, 100);
966 	}
967 
968 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
969 
970 	return -EIO;
971 }
972 
973 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
974 				  u32 addr, u32 length, u32 *buf)
975 {
976 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
977 	u32 dp_sel;
978 	int i, ret;
979 
980 	if (usb_autopm_get_interface(dev->intf) < 0)
981 			return 0;
982 
983 	mutex_lock(&pdata->dataport_mutex);
984 
985 	ret = lan78xx_dataport_wait_not_busy(dev);
986 	if (ret < 0)
987 		goto done;
988 
989 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
990 
991 	dp_sel &= ~DP_SEL_RSEL_MASK_;
992 	dp_sel |= ram_select;
993 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
994 
995 	for (i = 0; i < length; i++) {
996 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
997 
998 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
999 
1000 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1001 
1002 		ret = lan78xx_dataport_wait_not_busy(dev);
1003 		if (ret < 0)
1004 			goto done;
1005 	}
1006 
1007 done:
1008 	mutex_unlock(&pdata->dataport_mutex);
1009 	usb_autopm_put_interface(dev->intf);
1010 
1011 	return ret;
1012 }
1013 
1014 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1015 				    int index, u8 addr[ETH_ALEN])
1016 {
1017 	u32 temp;
1018 
1019 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1020 		temp = addr[3];
1021 		temp = addr[2] | (temp << 8);
1022 		temp = addr[1] | (temp << 8);
1023 		temp = addr[0] | (temp << 8);
1024 		pdata->pfilter_table[index][1] = temp;
1025 		temp = addr[5];
1026 		temp = addr[4] | (temp << 8);
1027 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1028 		pdata->pfilter_table[index][0] = temp;
1029 	}
1030 }
1031 
1032 /* returns hash bit number for given MAC address */
1033 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1034 {
1035 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1036 }
1037 
1038 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1039 {
1040 	struct lan78xx_priv *pdata =
1041 			container_of(param, struct lan78xx_priv, set_multicast);
1042 	struct lan78xx_net *dev = pdata->dev;
1043 	int i;
1044 	int ret;
1045 
1046 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1047 		  pdata->rfe_ctl);
1048 
1049 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1050 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1051 
1052 	for (i = 1; i < NUM_OF_MAF; i++) {
1053 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1054 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1055 					pdata->pfilter_table[i][1]);
1056 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1057 					pdata->pfilter_table[i][0]);
1058 	}
1059 
1060 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1061 }
1062 
1063 static void lan78xx_set_multicast(struct net_device *netdev)
1064 {
1065 	struct lan78xx_net *dev = netdev_priv(netdev);
1066 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1067 	unsigned long flags;
1068 	int i;
1069 
1070 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1071 
1072 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1073 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1074 
1075 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1076 			pdata->mchash_table[i] = 0;
1077 	/* pfilter_table[0] has own HW address */
1078 	for (i = 1; i < NUM_OF_MAF; i++) {
1079 			pdata->pfilter_table[i][0] =
1080 			pdata->pfilter_table[i][1] = 0;
1081 	}
1082 
1083 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1084 
1085 	if (dev->net->flags & IFF_PROMISC) {
1086 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1087 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1088 	} else {
1089 		if (dev->net->flags & IFF_ALLMULTI) {
1090 			netif_dbg(dev, drv, dev->net,
1091 				  "receive all multicast enabled");
1092 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1093 		}
1094 	}
1095 
1096 	if (netdev_mc_count(dev->net)) {
1097 		struct netdev_hw_addr *ha;
1098 		int i;
1099 
1100 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1101 
1102 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1103 
1104 		i = 1;
1105 		netdev_for_each_mc_addr(ha, netdev) {
1106 			/* set first 32 into Perfect Filter */
1107 			if (i < 33) {
1108 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1109 			} else {
1110 				u32 bitnum = lan78xx_hash(ha->addr);
1111 
1112 				pdata->mchash_table[bitnum / 32] |=
1113 							(1 << (bitnum % 32));
1114 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1115 			}
1116 			i++;
1117 		}
1118 	}
1119 
1120 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1121 
1122 	/* defer register writes to a sleepable context */
1123 	schedule_work(&pdata->set_multicast);
1124 }
1125 
1126 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1127 				      u16 lcladv, u16 rmtadv)
1128 {
1129 	u32 flow = 0, fct_flow = 0;
1130 	int ret;
1131 	u8 cap;
1132 
1133 	if (dev->fc_autoneg)
1134 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1135 	else
1136 		cap = dev->fc_request_control;
1137 
1138 	if (cap & FLOW_CTRL_TX)
1139 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1140 
1141 	if (cap & FLOW_CTRL_RX)
1142 		flow |= FLOW_CR_RX_FCEN_;
1143 
1144 	if (dev->udev->speed == USB_SPEED_SUPER)
1145 		fct_flow = 0x817;
1146 	else if (dev->udev->speed == USB_SPEED_HIGH)
1147 		fct_flow = 0x211;
1148 
1149 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1150 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1151 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1152 
1153 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1154 
1155 	/* threshold value should be set before enabling flow */
1156 	ret = lan78xx_write_reg(dev, FLOW, flow);
1157 
1158 	return 0;
1159 }
1160 
1161 static int lan78xx_link_reset(struct lan78xx_net *dev)
1162 {
1163 	struct phy_device *phydev = dev->net->phydev;
1164 	struct ethtool_link_ksettings ecmd;
1165 	int ladv, radv, ret;
1166 	u32 buf;
1167 
1168 	/* clear LAN78xx interrupt status */
1169 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1170 	if (unlikely(ret < 0))
1171 		return -EIO;
1172 
1173 	phy_read_status(phydev);
1174 
1175 	if (!phydev->link && dev->link_on) {
1176 		dev->link_on = false;
1177 
1178 		/* reset MAC */
1179 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1180 		if (unlikely(ret < 0))
1181 			return -EIO;
1182 		buf |= MAC_CR_RST_;
1183 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1184 		if (unlikely(ret < 0))
1185 			return -EIO;
1186 
1187 		del_timer(&dev->stat_monitor);
1188 	} else if (phydev->link && !dev->link_on) {
1189 		dev->link_on = true;
1190 
1191 		phy_ethtool_ksettings_get(phydev, &ecmd);
1192 
1193 		if (dev->udev->speed == USB_SPEED_SUPER) {
1194 			if (ecmd.base.speed == 1000) {
1195 				/* disable U2 */
1196 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1197 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1198 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1199 				/* enable U1 */
1200 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1201 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1202 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1203 			} else {
1204 				/* enable U1 & U2 */
1205 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1206 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1207 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1208 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1209 			}
1210 		}
1211 
1212 		ladv = phy_read(phydev, MII_ADVERTISE);
1213 		if (ladv < 0)
1214 			return ladv;
1215 
1216 		radv = phy_read(phydev, MII_LPA);
1217 		if (radv < 0)
1218 			return radv;
1219 
1220 		netif_dbg(dev, link, dev->net,
1221 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1222 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1223 
1224 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1225 						 radv);
1226 
1227 		if (!timer_pending(&dev->stat_monitor)) {
1228 			dev->delta = 1;
1229 			mod_timer(&dev->stat_monitor,
1230 				  jiffies + STAT_UPDATE_TIMER);
1231 		}
1232 
1233 		tasklet_schedule(&dev->bh);
1234 	}
1235 
1236 	return ret;
1237 }
1238 
1239 /* some work can't be done in tasklets, so we use keventd
1240  *
1241  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1242  * but tasklet_schedule() doesn't.	hope the failure is rare.
1243  */
1244 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1245 {
1246 	set_bit(work, &dev->flags);
1247 	if (!schedule_delayed_work(&dev->wq, 0))
1248 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1249 }
1250 
1251 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1252 {
1253 	u32 intdata;
1254 
1255 	if (urb->actual_length != 4) {
1256 		netdev_warn(dev->net,
1257 			    "unexpected urb length %d", urb->actual_length);
1258 		return;
1259 	}
1260 
1261 	intdata = get_unaligned_le32(urb->transfer_buffer);
1262 
1263 	if (intdata & INT_ENP_PHY_INT) {
1264 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1265 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1266 
1267 		if (dev->domain_data.phyirq > 0) {
1268 			local_irq_disable();
1269 			generic_handle_irq(dev->domain_data.phyirq);
1270 			local_irq_enable();
1271 		}
1272 	} else
1273 		netdev_warn(dev->net,
1274 			    "unexpected interrupt: 0x%08x\n", intdata);
1275 }
1276 
1277 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1278 {
1279 	return MAX_EEPROM_SIZE;
1280 }
1281 
1282 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1283 				      struct ethtool_eeprom *ee, u8 *data)
1284 {
1285 	struct lan78xx_net *dev = netdev_priv(netdev);
1286 	int ret;
1287 
1288 	ret = usb_autopm_get_interface(dev->intf);
1289 	if (ret)
1290 		return ret;
1291 
1292 	ee->magic = LAN78XX_EEPROM_MAGIC;
1293 
1294 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1295 
1296 	usb_autopm_put_interface(dev->intf);
1297 
1298 	return ret;
1299 }
1300 
1301 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1302 				      struct ethtool_eeprom *ee, u8 *data)
1303 {
1304 	struct lan78xx_net *dev = netdev_priv(netdev);
1305 	int ret;
1306 
1307 	ret = usb_autopm_get_interface(dev->intf);
1308 	if (ret)
1309 		return ret;
1310 
1311 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1312 	 * to load data from EEPROM
1313 	 */
1314 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1315 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1316 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1317 		 (ee->offset == 0) &&
1318 		 (ee->len == 512) &&
1319 		 (data[0] == OTP_INDICATOR_1))
1320 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1321 
1322 	usb_autopm_put_interface(dev->intf);
1323 
1324 	return ret;
1325 }
1326 
1327 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1328 				u8 *data)
1329 {
1330 	if (stringset == ETH_SS_STATS)
1331 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1332 }
1333 
1334 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1335 {
1336 	if (sset == ETH_SS_STATS)
1337 		return ARRAY_SIZE(lan78xx_gstrings);
1338 	else
1339 		return -EOPNOTSUPP;
1340 }
1341 
1342 static void lan78xx_get_stats(struct net_device *netdev,
1343 			      struct ethtool_stats *stats, u64 *data)
1344 {
1345 	struct lan78xx_net *dev = netdev_priv(netdev);
1346 
1347 	lan78xx_update_stats(dev);
1348 
1349 	mutex_lock(&dev->stats.access_lock);
1350 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1351 	mutex_unlock(&dev->stats.access_lock);
1352 }
1353 
1354 static void lan78xx_get_wol(struct net_device *netdev,
1355 			    struct ethtool_wolinfo *wol)
1356 {
1357 	struct lan78xx_net *dev = netdev_priv(netdev);
1358 	int ret;
1359 	u32 buf;
1360 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1361 
1362 	if (usb_autopm_get_interface(dev->intf) < 0)
1363 			return;
1364 
1365 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1366 	if (unlikely(ret < 0)) {
1367 		wol->supported = 0;
1368 		wol->wolopts = 0;
1369 	} else {
1370 		if (buf & USB_CFG_RMT_WKP_) {
1371 			wol->supported = WAKE_ALL;
1372 			wol->wolopts = pdata->wol;
1373 		} else {
1374 			wol->supported = 0;
1375 			wol->wolopts = 0;
1376 		}
1377 	}
1378 
1379 	usb_autopm_put_interface(dev->intf);
1380 }
1381 
1382 static int lan78xx_set_wol(struct net_device *netdev,
1383 			   struct ethtool_wolinfo *wol)
1384 {
1385 	struct lan78xx_net *dev = netdev_priv(netdev);
1386 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1387 	int ret;
1388 
1389 	ret = usb_autopm_get_interface(dev->intf);
1390 	if (ret < 0)
1391 		return ret;
1392 
1393 	if (wol->wolopts & ~WAKE_ALL)
1394 		return -EINVAL;
1395 
1396 	pdata->wol = wol->wolopts;
1397 
1398 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1399 
1400 	phy_ethtool_set_wol(netdev->phydev, wol);
1401 
1402 	usb_autopm_put_interface(dev->intf);
1403 
1404 	return ret;
1405 }
1406 
1407 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1408 {
1409 	struct lan78xx_net *dev = netdev_priv(net);
1410 	struct phy_device *phydev = net->phydev;
1411 	int ret;
1412 	u32 buf;
1413 
1414 	ret = usb_autopm_get_interface(dev->intf);
1415 	if (ret < 0)
1416 		return ret;
1417 
1418 	ret = phy_ethtool_get_eee(phydev, edata);
1419 	if (ret < 0)
1420 		goto exit;
1421 
1422 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1423 	if (buf & MAC_CR_EEE_EN_) {
1424 		edata->eee_enabled = true;
1425 		edata->eee_active = !!(edata->advertised &
1426 				       edata->lp_advertised);
1427 		edata->tx_lpi_enabled = true;
1428 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1429 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1430 		edata->tx_lpi_timer = buf;
1431 	} else {
1432 		edata->eee_enabled = false;
1433 		edata->eee_active = false;
1434 		edata->tx_lpi_enabled = false;
1435 		edata->tx_lpi_timer = 0;
1436 	}
1437 
1438 	ret = 0;
1439 exit:
1440 	usb_autopm_put_interface(dev->intf);
1441 
1442 	return ret;
1443 }
1444 
1445 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1446 {
1447 	struct lan78xx_net *dev = netdev_priv(net);
1448 	int ret;
1449 	u32 buf;
1450 
1451 	ret = usb_autopm_get_interface(dev->intf);
1452 	if (ret < 0)
1453 		return ret;
1454 
1455 	if (edata->eee_enabled) {
1456 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1457 		buf |= MAC_CR_EEE_EN_;
1458 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1459 
1460 		phy_ethtool_set_eee(net->phydev, edata);
1461 
1462 		buf = (u32)edata->tx_lpi_timer;
1463 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1464 	} else {
1465 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1466 		buf &= ~MAC_CR_EEE_EN_;
1467 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1468 	}
1469 
1470 	usb_autopm_put_interface(dev->intf);
1471 
1472 	return 0;
1473 }
1474 
1475 static u32 lan78xx_get_link(struct net_device *net)
1476 {
1477 	phy_read_status(net->phydev);
1478 
1479 	return net->phydev->link;
1480 }
1481 
1482 static void lan78xx_get_drvinfo(struct net_device *net,
1483 				struct ethtool_drvinfo *info)
1484 {
1485 	struct lan78xx_net *dev = netdev_priv(net);
1486 
1487 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1488 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1489 }
1490 
1491 static u32 lan78xx_get_msglevel(struct net_device *net)
1492 {
1493 	struct lan78xx_net *dev = netdev_priv(net);
1494 
1495 	return dev->msg_enable;
1496 }
1497 
1498 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1499 {
1500 	struct lan78xx_net *dev = netdev_priv(net);
1501 
1502 	dev->msg_enable = level;
1503 }
1504 
1505 static int lan78xx_get_link_ksettings(struct net_device *net,
1506 				      struct ethtool_link_ksettings *cmd)
1507 {
1508 	struct lan78xx_net *dev = netdev_priv(net);
1509 	struct phy_device *phydev = net->phydev;
1510 	int ret;
1511 
1512 	ret = usb_autopm_get_interface(dev->intf);
1513 	if (ret < 0)
1514 		return ret;
1515 
1516 	phy_ethtool_ksettings_get(phydev, cmd);
1517 
1518 	usb_autopm_put_interface(dev->intf);
1519 
1520 	return ret;
1521 }
1522 
1523 static int lan78xx_set_link_ksettings(struct net_device *net,
1524 				      const struct ethtool_link_ksettings *cmd)
1525 {
1526 	struct lan78xx_net *dev = netdev_priv(net);
1527 	struct phy_device *phydev = net->phydev;
1528 	int ret = 0;
1529 	int temp;
1530 
1531 	ret = usb_autopm_get_interface(dev->intf);
1532 	if (ret < 0)
1533 		return ret;
1534 
1535 	/* change speed & duplex */
1536 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1537 
1538 	if (!cmd->base.autoneg) {
1539 		/* force link down */
1540 		temp = phy_read(phydev, MII_BMCR);
1541 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1542 		mdelay(1);
1543 		phy_write(phydev, MII_BMCR, temp);
1544 	}
1545 
1546 	usb_autopm_put_interface(dev->intf);
1547 
1548 	return ret;
1549 }
1550 
1551 static void lan78xx_get_pause(struct net_device *net,
1552 			      struct ethtool_pauseparam *pause)
1553 {
1554 	struct lan78xx_net *dev = netdev_priv(net);
1555 	struct phy_device *phydev = net->phydev;
1556 	struct ethtool_link_ksettings ecmd;
1557 
1558 	phy_ethtool_ksettings_get(phydev, &ecmd);
1559 
1560 	pause->autoneg = dev->fc_autoneg;
1561 
1562 	if (dev->fc_request_control & FLOW_CTRL_TX)
1563 		pause->tx_pause = 1;
1564 
1565 	if (dev->fc_request_control & FLOW_CTRL_RX)
1566 		pause->rx_pause = 1;
1567 }
1568 
1569 static int lan78xx_set_pause(struct net_device *net,
1570 			     struct ethtool_pauseparam *pause)
1571 {
1572 	struct lan78xx_net *dev = netdev_priv(net);
1573 	struct phy_device *phydev = net->phydev;
1574 	struct ethtool_link_ksettings ecmd;
1575 	int ret;
1576 
1577 	phy_ethtool_ksettings_get(phydev, &ecmd);
1578 
1579 	if (pause->autoneg && !ecmd.base.autoneg) {
1580 		ret = -EINVAL;
1581 		goto exit;
1582 	}
1583 
1584 	dev->fc_request_control = 0;
1585 	if (pause->rx_pause)
1586 		dev->fc_request_control |= FLOW_CTRL_RX;
1587 
1588 	if (pause->tx_pause)
1589 		dev->fc_request_control |= FLOW_CTRL_TX;
1590 
1591 	if (ecmd.base.autoneg) {
1592 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1593 		u32 mii_adv;
1594 
1595 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1596 				   ecmd.link_modes.advertising);
1597 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1598 				   ecmd.link_modes.advertising);
1599 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1600 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1601 		linkmode_or(ecmd.link_modes.advertising, fc,
1602 			    ecmd.link_modes.advertising);
1603 
1604 		phy_ethtool_ksettings_set(phydev, &ecmd);
1605 	}
1606 
1607 	dev->fc_autoneg = pause->autoneg;
1608 
1609 	ret = 0;
1610 exit:
1611 	return ret;
1612 }
1613 
1614 static int lan78xx_get_regs_len(struct net_device *netdev)
1615 {
1616 	if (!netdev->phydev)
1617 		return (sizeof(lan78xx_regs));
1618 	else
1619 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1620 }
1621 
1622 static void
1623 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1624 		 void *buf)
1625 {
1626 	u32 *data = buf;
1627 	int i, j;
1628 	struct lan78xx_net *dev = netdev_priv(netdev);
1629 
1630 	/* Read Device/MAC registers */
1631 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1632 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1633 
1634 	if (!netdev->phydev)
1635 		return;
1636 
1637 	/* Read PHY registers */
1638 	for (j = 0; j < 32; i++, j++)
1639 		data[i] = phy_read(netdev->phydev, j);
1640 }
1641 
1642 static const struct ethtool_ops lan78xx_ethtool_ops = {
1643 	.get_link	= lan78xx_get_link,
1644 	.nway_reset	= phy_ethtool_nway_reset,
1645 	.get_drvinfo	= lan78xx_get_drvinfo,
1646 	.get_msglevel	= lan78xx_get_msglevel,
1647 	.set_msglevel	= lan78xx_set_msglevel,
1648 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1649 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1650 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1651 	.get_ethtool_stats = lan78xx_get_stats,
1652 	.get_sset_count = lan78xx_get_sset_count,
1653 	.get_strings	= lan78xx_get_strings,
1654 	.get_wol	= lan78xx_get_wol,
1655 	.set_wol	= lan78xx_set_wol,
1656 	.get_eee	= lan78xx_get_eee,
1657 	.set_eee	= lan78xx_set_eee,
1658 	.get_pauseparam	= lan78xx_get_pause,
1659 	.set_pauseparam	= lan78xx_set_pause,
1660 	.get_link_ksettings = lan78xx_get_link_ksettings,
1661 	.set_link_ksettings = lan78xx_set_link_ksettings,
1662 	.get_regs_len	= lan78xx_get_regs_len,
1663 	.get_regs	= lan78xx_get_regs,
1664 };
1665 
1666 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1667 {
1668 	if (!netif_running(netdev))
1669 		return -EINVAL;
1670 
1671 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1672 }
1673 
1674 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1675 {
1676 	u32 addr_lo, addr_hi;
1677 	int ret;
1678 	u8 addr[6];
1679 
1680 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1681 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1682 
1683 	addr[0] = addr_lo & 0xFF;
1684 	addr[1] = (addr_lo >> 8) & 0xFF;
1685 	addr[2] = (addr_lo >> 16) & 0xFF;
1686 	addr[3] = (addr_lo >> 24) & 0xFF;
1687 	addr[4] = addr_hi & 0xFF;
1688 	addr[5] = (addr_hi >> 8) & 0xFF;
1689 
1690 	if (!is_valid_ether_addr(addr)) {
1691 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1692 			/* valid address present in Device Tree */
1693 			netif_dbg(dev, ifup, dev->net,
1694 				  "MAC address read from Device Tree");
1695 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1696 						 ETH_ALEN, addr) == 0) ||
1697 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1698 					      ETH_ALEN, addr) == 0)) &&
1699 			   is_valid_ether_addr(addr)) {
1700 			/* eeprom values are valid so use them */
1701 			netif_dbg(dev, ifup, dev->net,
1702 				  "MAC address read from EEPROM");
1703 		} else {
1704 			/* generate random MAC */
1705 			eth_random_addr(addr);
1706 			netif_dbg(dev, ifup, dev->net,
1707 				  "MAC address set to random addr");
1708 		}
1709 
1710 		addr_lo = addr[0] | (addr[1] << 8) |
1711 			  (addr[2] << 16) | (addr[3] << 24);
1712 		addr_hi = addr[4] | (addr[5] << 8);
1713 
1714 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1715 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1716 	}
1717 
1718 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1719 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1720 
1721 	ether_addr_copy(dev->net->dev_addr, addr);
1722 }
1723 
1724 /* MDIO read and write wrappers for phylib */
1725 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1726 {
1727 	struct lan78xx_net *dev = bus->priv;
1728 	u32 val, addr;
1729 	int ret;
1730 
1731 	ret = usb_autopm_get_interface(dev->intf);
1732 	if (ret < 0)
1733 		return ret;
1734 
1735 	mutex_lock(&dev->phy_mutex);
1736 
1737 	/* confirm MII not busy */
1738 	ret = lan78xx_phy_wait_not_busy(dev);
1739 	if (ret < 0)
1740 		goto done;
1741 
1742 	/* set the address, index & direction (read from PHY) */
1743 	addr = mii_access(phy_id, idx, MII_READ);
1744 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1745 
1746 	ret = lan78xx_phy_wait_not_busy(dev);
1747 	if (ret < 0)
1748 		goto done;
1749 
1750 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1751 
1752 	ret = (int)(val & 0xFFFF);
1753 
1754 done:
1755 	mutex_unlock(&dev->phy_mutex);
1756 	usb_autopm_put_interface(dev->intf);
1757 
1758 	return ret;
1759 }
1760 
1761 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1762 				 u16 regval)
1763 {
1764 	struct lan78xx_net *dev = bus->priv;
1765 	u32 val, addr;
1766 	int ret;
1767 
1768 	ret = usb_autopm_get_interface(dev->intf);
1769 	if (ret < 0)
1770 		return ret;
1771 
1772 	mutex_lock(&dev->phy_mutex);
1773 
1774 	/* confirm MII not busy */
1775 	ret = lan78xx_phy_wait_not_busy(dev);
1776 	if (ret < 0)
1777 		goto done;
1778 
1779 	val = (u32)regval;
1780 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1781 
1782 	/* set the address, index & direction (write to PHY) */
1783 	addr = mii_access(phy_id, idx, MII_WRITE);
1784 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1785 
1786 	ret = lan78xx_phy_wait_not_busy(dev);
1787 	if (ret < 0)
1788 		goto done;
1789 
1790 done:
1791 	mutex_unlock(&dev->phy_mutex);
1792 	usb_autopm_put_interface(dev->intf);
1793 	return 0;
1794 }
1795 
1796 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1797 {
1798 	struct device_node *node;
1799 	int ret;
1800 
1801 	dev->mdiobus = mdiobus_alloc();
1802 	if (!dev->mdiobus) {
1803 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1804 		return -ENOMEM;
1805 	}
1806 
1807 	dev->mdiobus->priv = (void *)dev;
1808 	dev->mdiobus->read = lan78xx_mdiobus_read;
1809 	dev->mdiobus->write = lan78xx_mdiobus_write;
1810 	dev->mdiobus->name = "lan78xx-mdiobus";
1811 
1812 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1813 		 dev->udev->bus->busnum, dev->udev->devnum);
1814 
1815 	switch (dev->chipid) {
1816 	case ID_REV_CHIP_ID_7800_:
1817 	case ID_REV_CHIP_ID_7850_:
1818 		/* set to internal PHY id */
1819 		dev->mdiobus->phy_mask = ~(1 << 1);
1820 		break;
1821 	case ID_REV_CHIP_ID_7801_:
1822 		/* scan thru PHYAD[2..0] */
1823 		dev->mdiobus->phy_mask = ~(0xFF);
1824 		break;
1825 	}
1826 
1827 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1828 	ret = of_mdiobus_register(dev->mdiobus, node);
1829 	of_node_put(node);
1830 	if (ret) {
1831 		netdev_err(dev->net, "can't register MDIO bus\n");
1832 		goto exit1;
1833 	}
1834 
1835 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1836 	return 0;
1837 exit1:
1838 	mdiobus_free(dev->mdiobus);
1839 	return ret;
1840 }
1841 
1842 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1843 {
1844 	mdiobus_unregister(dev->mdiobus);
1845 	mdiobus_free(dev->mdiobus);
1846 }
1847 
1848 static void lan78xx_link_status_change(struct net_device *net)
1849 {
1850 	struct phy_device *phydev = net->phydev;
1851 	int ret, temp;
1852 
1853 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1854 	 * when cable is switched between long(~50+m) and short one.
1855 	 * As workaround, set to 10 before setting to 100
1856 	 * at forced 100 F/H mode.
1857 	 */
1858 	if (!phydev->autoneg && (phydev->speed == 100)) {
1859 		/* disable phy interrupt */
1860 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1861 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1862 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1863 
1864 		temp = phy_read(phydev, MII_BMCR);
1865 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1866 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1867 		temp |= BMCR_SPEED100;
1868 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1869 
1870 		/* clear pending interrupt generated while workaround */
1871 		temp = phy_read(phydev, LAN88XX_INT_STS);
1872 
1873 		/* enable phy interrupt back */
1874 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1875 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1876 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1877 	}
1878 }
1879 
1880 static int irq_map(struct irq_domain *d, unsigned int irq,
1881 		   irq_hw_number_t hwirq)
1882 {
1883 	struct irq_domain_data *data = d->host_data;
1884 
1885 	irq_set_chip_data(irq, data);
1886 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1887 	irq_set_noprobe(irq);
1888 
1889 	return 0;
1890 }
1891 
1892 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1893 {
1894 	irq_set_chip_and_handler(irq, NULL, NULL);
1895 	irq_set_chip_data(irq, NULL);
1896 }
1897 
1898 static const struct irq_domain_ops chip_domain_ops = {
1899 	.map	= irq_map,
1900 	.unmap	= irq_unmap,
1901 };
1902 
1903 static void lan78xx_irq_mask(struct irq_data *irqd)
1904 {
1905 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1906 
1907 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1908 }
1909 
1910 static void lan78xx_irq_unmask(struct irq_data *irqd)
1911 {
1912 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1913 
1914 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1915 }
1916 
1917 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1918 {
1919 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1920 
1921 	mutex_lock(&data->irq_lock);
1922 }
1923 
1924 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1925 {
1926 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1927 	struct lan78xx_net *dev =
1928 			container_of(data, struct lan78xx_net, domain_data);
1929 	u32 buf;
1930 	int ret;
1931 
1932 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
1933 	 * are only two callbacks executed in non-atomic contex.
1934 	 */
1935 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1936 	if (buf != data->irqenable)
1937 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1938 
1939 	mutex_unlock(&data->irq_lock);
1940 }
1941 
1942 static struct irq_chip lan78xx_irqchip = {
1943 	.name			= "lan78xx-irqs",
1944 	.irq_mask		= lan78xx_irq_mask,
1945 	.irq_unmask		= lan78xx_irq_unmask,
1946 	.irq_bus_lock		= lan78xx_irq_bus_lock,
1947 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
1948 };
1949 
1950 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1951 {
1952 	struct device_node *of_node;
1953 	struct irq_domain *irqdomain;
1954 	unsigned int irqmap = 0;
1955 	u32 buf;
1956 	int ret = 0;
1957 
1958 	of_node = dev->udev->dev.parent->of_node;
1959 
1960 	mutex_init(&dev->domain_data.irq_lock);
1961 
1962 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1963 	dev->domain_data.irqenable = buf;
1964 
1965 	dev->domain_data.irqchip = &lan78xx_irqchip;
1966 	dev->domain_data.irq_handler = handle_simple_irq;
1967 
1968 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1969 					  &chip_domain_ops, &dev->domain_data);
1970 	if (irqdomain) {
1971 		/* create mapping for PHY interrupt */
1972 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1973 		if (!irqmap) {
1974 			irq_domain_remove(irqdomain);
1975 
1976 			irqdomain = NULL;
1977 			ret = -EINVAL;
1978 		}
1979 	} else {
1980 		ret = -EINVAL;
1981 	}
1982 
1983 	dev->domain_data.irqdomain = irqdomain;
1984 	dev->domain_data.phyirq = irqmap;
1985 
1986 	return ret;
1987 }
1988 
1989 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1990 {
1991 	if (dev->domain_data.phyirq > 0) {
1992 		irq_dispose_mapping(dev->domain_data.phyirq);
1993 
1994 		if (dev->domain_data.irqdomain)
1995 			irq_domain_remove(dev->domain_data.irqdomain);
1996 	}
1997 	dev->domain_data.phyirq = 0;
1998 	dev->domain_data.irqdomain = NULL;
1999 }
2000 
2001 static int lan8835_fixup(struct phy_device *phydev)
2002 {
2003 	int buf;
2004 	int ret;
2005 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2006 
2007 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2008 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2009 	buf &= ~0x1800;
2010 	buf |= 0x0800;
2011 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2012 
2013 	/* RGMII MAC TXC Delay Enable */
2014 	ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2015 				MAC_RGMII_ID_TXC_DELAY_EN_);
2016 
2017 	/* RGMII TX DLL Tune Adjust */
2018 	ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2019 
2020 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2021 
2022 	return 1;
2023 }
2024 
2025 static int ksz9031rnx_fixup(struct phy_device *phydev)
2026 {
2027 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2028 
2029 	/* Micrel9301RNX PHY configuration */
2030 	/* RGMII Control Signal Pad Skew */
2031 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2032 	/* RGMII RX Data Pad Skew */
2033 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2034 	/* RGMII RX Clock Pad Skew */
2035 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2036 
2037 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2038 
2039 	return 1;
2040 }
2041 
2042 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2043 {
2044 	u32 buf;
2045 	int ret;
2046 	struct fixed_phy_status fphy_status = {
2047 		.link = 1,
2048 		.speed = SPEED_1000,
2049 		.duplex = DUPLEX_FULL,
2050 	};
2051 	struct phy_device *phydev;
2052 
2053 	phydev = phy_find_first(dev->mdiobus);
2054 	if (!phydev) {
2055 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2056 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2057 		if (IS_ERR(phydev)) {
2058 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2059 			return NULL;
2060 		}
2061 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2062 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2063 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2064 					MAC_RGMII_ID_TXC_DELAY_EN_);
2065 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2066 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2067 		buf |= HW_CFG_CLK125_EN_;
2068 		buf |= HW_CFG_REFCLK25_EN_;
2069 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2070 	} else {
2071 		if (!phydev->drv) {
2072 			netdev_err(dev->net, "no PHY driver found\n");
2073 			return NULL;
2074 		}
2075 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2076 		/* external PHY fixup for KSZ9031RNX */
2077 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2078 						 ksz9031rnx_fixup);
2079 		if (ret < 0) {
2080 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2081 			return NULL;
2082 		}
2083 		/* external PHY fixup for LAN8835 */
2084 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2085 						 lan8835_fixup);
2086 		if (ret < 0) {
2087 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2088 			return NULL;
2089 		}
2090 		/* add more external PHY fixup here if needed */
2091 
2092 		phydev->is_internal = false;
2093 	}
2094 	return phydev;
2095 }
2096 
2097 static int lan78xx_phy_init(struct lan78xx_net *dev)
2098 {
2099 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2100 	int ret;
2101 	u32 mii_adv;
2102 	struct phy_device *phydev;
2103 
2104 	switch (dev->chipid) {
2105 	case ID_REV_CHIP_ID_7801_:
2106 		phydev = lan7801_phy_init(dev);
2107 		if (!phydev) {
2108 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2109 			return -EIO;
2110 		}
2111 		break;
2112 
2113 	case ID_REV_CHIP_ID_7800_:
2114 	case ID_REV_CHIP_ID_7850_:
2115 		phydev = phy_find_first(dev->mdiobus);
2116 		if (!phydev) {
2117 			netdev_err(dev->net, "no PHY found\n");
2118 			return -EIO;
2119 		}
2120 		phydev->is_internal = true;
2121 		dev->interface = PHY_INTERFACE_MODE_GMII;
2122 		break;
2123 
2124 	default:
2125 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2126 		return -EIO;
2127 	}
2128 
2129 	/* if phyirq is not set, use polling mode in phylib */
2130 	if (dev->domain_data.phyirq > 0)
2131 		phydev->irq = dev->domain_data.phyirq;
2132 	else
2133 		phydev->irq = 0;
2134 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2135 
2136 	/* set to AUTOMDIX */
2137 	phydev->mdix = ETH_TP_MDI_AUTO;
2138 
2139 	ret = phy_connect_direct(dev->net, phydev,
2140 				 lan78xx_link_status_change,
2141 				 dev->interface);
2142 	if (ret) {
2143 		netdev_err(dev->net, "can't attach PHY to %s\n",
2144 			   dev->mdiobus->id);
2145 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2146 			if (phy_is_pseudo_fixed_link(phydev)) {
2147 				fixed_phy_unregister(phydev);
2148 			} else {
2149 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2150 							     0xfffffff0);
2151 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2152 							     0xfffffff0);
2153 			}
2154 		}
2155 		return -EIO;
2156 	}
2157 
2158 	/* MAC doesn't support 1000T Half */
2159 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2160 
2161 	/* support both flow controls */
2162 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2163 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2164 			   phydev->advertising);
2165 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2166 			   phydev->advertising);
2167 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2168 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2169 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2170 
2171 	if (phydev->mdio.dev.of_node) {
2172 		u32 reg;
2173 		int len;
2174 
2175 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2176 						      "microchip,led-modes",
2177 						      sizeof(u32));
2178 		if (len >= 0) {
2179 			/* Ensure the appropriate LEDs are enabled */
2180 			lan78xx_read_reg(dev, HW_CFG, &reg);
2181 			reg &= ~(HW_CFG_LED0_EN_ |
2182 				 HW_CFG_LED1_EN_ |
2183 				 HW_CFG_LED2_EN_ |
2184 				 HW_CFG_LED3_EN_);
2185 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2186 				(len > 1) * HW_CFG_LED1_EN_ |
2187 				(len > 2) * HW_CFG_LED2_EN_ |
2188 				(len > 3) * HW_CFG_LED3_EN_;
2189 			lan78xx_write_reg(dev, HW_CFG, reg);
2190 		}
2191 	}
2192 
2193 	genphy_config_aneg(phydev);
2194 
2195 	dev->fc_autoneg = phydev->autoneg;
2196 
2197 	return 0;
2198 }
2199 
2200 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2201 {
2202 	int ret = 0;
2203 	u32 buf;
2204 	bool rxenabled;
2205 
2206 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2207 
2208 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2209 
2210 	if (rxenabled) {
2211 		buf &= ~MAC_RX_RXEN_;
2212 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2213 	}
2214 
2215 	/* add 4 to size for FCS */
2216 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2217 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2218 
2219 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2220 
2221 	if (rxenabled) {
2222 		buf |= MAC_RX_RXEN_;
2223 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2230 {
2231 	struct sk_buff *skb;
2232 	unsigned long flags;
2233 	int count = 0;
2234 
2235 	spin_lock_irqsave(&q->lock, flags);
2236 	while (!skb_queue_empty(q)) {
2237 		struct skb_data	*entry;
2238 		struct urb *urb;
2239 		int ret;
2240 
2241 		skb_queue_walk(q, skb) {
2242 			entry = (struct skb_data *)skb->cb;
2243 			if (entry->state != unlink_start)
2244 				goto found;
2245 		}
2246 		break;
2247 found:
2248 		entry->state = unlink_start;
2249 		urb = entry->urb;
2250 
2251 		/* Get reference count of the URB to avoid it to be
2252 		 * freed during usb_unlink_urb, which may trigger
2253 		 * use-after-free problem inside usb_unlink_urb since
2254 		 * usb_unlink_urb is always racing with .complete
2255 		 * handler(include defer_bh).
2256 		 */
2257 		usb_get_urb(urb);
2258 		spin_unlock_irqrestore(&q->lock, flags);
2259 		/* during some PM-driven resume scenarios,
2260 		 * these (async) unlinks complete immediately
2261 		 */
2262 		ret = usb_unlink_urb(urb);
2263 		if (ret != -EINPROGRESS && ret != 0)
2264 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2265 		else
2266 			count++;
2267 		usb_put_urb(urb);
2268 		spin_lock_irqsave(&q->lock, flags);
2269 	}
2270 	spin_unlock_irqrestore(&q->lock, flags);
2271 	return count;
2272 }
2273 
2274 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2275 {
2276 	struct lan78xx_net *dev = netdev_priv(netdev);
2277 	int ll_mtu = new_mtu + netdev->hard_header_len;
2278 	int old_hard_mtu = dev->hard_mtu;
2279 	int old_rx_urb_size = dev->rx_urb_size;
2280 	int ret;
2281 
2282 	/* no second zero-length packet read wanted after mtu-sized packets */
2283 	if ((ll_mtu % dev->maxpacket) == 0)
2284 		return -EDOM;
2285 
2286 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2287 
2288 	netdev->mtu = new_mtu;
2289 
2290 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2291 	if (dev->rx_urb_size == old_hard_mtu) {
2292 		dev->rx_urb_size = dev->hard_mtu;
2293 		if (dev->rx_urb_size > old_rx_urb_size) {
2294 			if (netif_running(dev->net)) {
2295 				unlink_urbs(dev, &dev->rxq);
2296 				tasklet_schedule(&dev->bh);
2297 			}
2298 		}
2299 	}
2300 
2301 	return 0;
2302 }
2303 
2304 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2305 {
2306 	struct lan78xx_net *dev = netdev_priv(netdev);
2307 	struct sockaddr *addr = p;
2308 	u32 addr_lo, addr_hi;
2309 	int ret;
2310 
2311 	if (netif_running(netdev))
2312 		return -EBUSY;
2313 
2314 	if (!is_valid_ether_addr(addr->sa_data))
2315 		return -EADDRNOTAVAIL;
2316 
2317 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2318 
2319 	addr_lo = netdev->dev_addr[0] |
2320 		  netdev->dev_addr[1] << 8 |
2321 		  netdev->dev_addr[2] << 16 |
2322 		  netdev->dev_addr[3] << 24;
2323 	addr_hi = netdev->dev_addr[4] |
2324 		  netdev->dev_addr[5] << 8;
2325 
2326 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2327 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2328 
2329 	/* Added to support MAC address changes */
2330 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2331 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2332 
2333 	return 0;
2334 }
2335 
2336 /* Enable or disable Rx checksum offload engine */
2337 static int lan78xx_set_features(struct net_device *netdev,
2338 				netdev_features_t features)
2339 {
2340 	struct lan78xx_net *dev = netdev_priv(netdev);
2341 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2342 	unsigned long flags;
2343 	int ret;
2344 
2345 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2346 
2347 	if (features & NETIF_F_RXCSUM) {
2348 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2349 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2350 	} else {
2351 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2352 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2353 	}
2354 
2355 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2356 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2357 	else
2358 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2359 
2360 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2361 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2362 	else
2363 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2364 
2365 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2366 
2367 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2368 
2369 	return 0;
2370 }
2371 
2372 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2373 {
2374 	struct lan78xx_priv *pdata =
2375 			container_of(param, struct lan78xx_priv, set_vlan);
2376 	struct lan78xx_net *dev = pdata->dev;
2377 
2378 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2379 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2380 }
2381 
2382 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2383 				   __be16 proto, u16 vid)
2384 {
2385 	struct lan78xx_net *dev = netdev_priv(netdev);
2386 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2387 	u16 vid_bit_index;
2388 	u16 vid_dword_index;
2389 
2390 	vid_dword_index = (vid >> 5) & 0x7F;
2391 	vid_bit_index = vid & 0x1F;
2392 
2393 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2394 
2395 	/* defer register writes to a sleepable context */
2396 	schedule_work(&pdata->set_vlan);
2397 
2398 	return 0;
2399 }
2400 
2401 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2402 				    __be16 proto, u16 vid)
2403 {
2404 	struct lan78xx_net *dev = netdev_priv(netdev);
2405 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2406 	u16 vid_bit_index;
2407 	u16 vid_dword_index;
2408 
2409 	vid_dword_index = (vid >> 5) & 0x7F;
2410 	vid_bit_index = vid & 0x1F;
2411 
2412 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2413 
2414 	/* defer register writes to a sleepable context */
2415 	schedule_work(&pdata->set_vlan);
2416 
2417 	return 0;
2418 }
2419 
2420 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2421 {
2422 	int ret;
2423 	u32 buf;
2424 	u32 regs[6] = { 0 };
2425 
2426 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2427 	if (buf & USB_CFG1_LTM_ENABLE_) {
2428 		u8 temp[2];
2429 		/* Get values from EEPROM first */
2430 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2431 			if (temp[0] == 24) {
2432 				ret = lan78xx_read_raw_eeprom(dev,
2433 							      temp[1] * 2,
2434 							      24,
2435 							      (u8 *)regs);
2436 				if (ret < 0)
2437 					return;
2438 			}
2439 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2440 			if (temp[0] == 24) {
2441 				ret = lan78xx_read_raw_otp(dev,
2442 							   temp[1] * 2,
2443 							   24,
2444 							   (u8 *)regs);
2445 				if (ret < 0)
2446 					return;
2447 			}
2448 		}
2449 	}
2450 
2451 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2452 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2453 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2454 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2455 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2456 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2457 }
2458 
2459 static int lan78xx_reset(struct lan78xx_net *dev)
2460 {
2461 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2462 	u32 buf;
2463 	int ret = 0;
2464 	unsigned long timeout;
2465 	u8 sig;
2466 
2467 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2468 	buf |= HW_CFG_LRST_;
2469 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2470 
2471 	timeout = jiffies + HZ;
2472 	do {
2473 		mdelay(1);
2474 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2475 		if (time_after(jiffies, timeout)) {
2476 			netdev_warn(dev->net,
2477 				    "timeout on completion of LiteReset");
2478 			return -EIO;
2479 		}
2480 	} while (buf & HW_CFG_LRST_);
2481 
2482 	lan78xx_init_mac_address(dev);
2483 
2484 	/* save DEVID for later usage */
2485 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2486 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2487 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2488 
2489 	/* Respond to the IN token with a NAK */
2490 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2491 	buf |= USB_CFG_BIR_;
2492 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2493 
2494 	/* Init LTM */
2495 	lan78xx_init_ltm(dev);
2496 
2497 	if (dev->udev->speed == USB_SPEED_SUPER) {
2498 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2499 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2500 		dev->rx_qlen = 4;
2501 		dev->tx_qlen = 4;
2502 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2503 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2504 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2505 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2506 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2507 	} else {
2508 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2509 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2510 		dev->rx_qlen = 4;
2511 		dev->tx_qlen = 4;
2512 	}
2513 
2514 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2515 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2516 
2517 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2518 	buf |= HW_CFG_MEF_;
2519 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2520 
2521 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2522 	buf |= USB_CFG_BCE_;
2523 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2524 
2525 	/* set FIFO sizes */
2526 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2527 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2528 
2529 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2530 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2531 
2532 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2533 	ret = lan78xx_write_reg(dev, FLOW, 0);
2534 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2535 
2536 	/* Don't need rfe_ctl_lock during initialisation */
2537 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2538 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2539 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2540 
2541 	/* Enable or disable checksum offload engines */
2542 	lan78xx_set_features(dev->net, dev->net->features);
2543 
2544 	lan78xx_set_multicast(dev->net);
2545 
2546 	/* reset PHY */
2547 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2548 	buf |= PMT_CTL_PHY_RST_;
2549 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2550 
2551 	timeout = jiffies + HZ;
2552 	do {
2553 		mdelay(1);
2554 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2555 		if (time_after(jiffies, timeout)) {
2556 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2557 			return -EIO;
2558 		}
2559 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2560 
2561 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2562 	/* LAN7801 only has RGMII mode */
2563 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2564 		buf &= ~MAC_CR_GMII_EN_;
2565 
2566 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2567 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2568 		if (!ret && sig != EEPROM_INDICATOR) {
2569 			/* Implies there is no external eeprom. Set mac speed */
2570 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2571 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2572 		}
2573 	}
2574 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2575 
2576 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2577 	buf |= MAC_TX_TXEN_;
2578 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2579 
2580 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2581 	buf |= FCT_TX_CTL_EN_;
2582 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2583 
2584 	ret = lan78xx_set_rx_max_frame_length(dev,
2585 					      dev->net->mtu + VLAN_ETH_HLEN);
2586 
2587 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2588 	buf |= MAC_RX_RXEN_;
2589 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2590 
2591 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2592 	buf |= FCT_RX_CTL_EN_;
2593 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2594 
2595 	return 0;
2596 }
2597 
2598 static void lan78xx_init_stats(struct lan78xx_net *dev)
2599 {
2600 	u32 *p;
2601 	int i;
2602 
2603 	/* initialize for stats update
2604 	 * some counters are 20bits and some are 32bits
2605 	 */
2606 	p = (u32 *)&dev->stats.rollover_max;
2607 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2608 		p[i] = 0xFFFFF;
2609 
2610 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2611 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2612 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2613 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2614 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2615 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2616 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2617 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2618 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2619 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2620 
2621 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2622 }
2623 
2624 static int lan78xx_open(struct net_device *net)
2625 {
2626 	struct lan78xx_net *dev = netdev_priv(net);
2627 	int ret;
2628 
2629 	ret = usb_autopm_get_interface(dev->intf);
2630 	if (ret < 0)
2631 		goto out;
2632 
2633 	phy_start(net->phydev);
2634 
2635 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2636 
2637 	/* for Link Check */
2638 	if (dev->urb_intr) {
2639 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2640 		if (ret < 0) {
2641 			netif_err(dev, ifup, dev->net,
2642 				  "intr submit %d\n", ret);
2643 			goto done;
2644 		}
2645 	}
2646 
2647 	lan78xx_init_stats(dev);
2648 
2649 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2650 
2651 	netif_start_queue(net);
2652 
2653 	dev->link_on = false;
2654 
2655 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2656 done:
2657 	usb_autopm_put_interface(dev->intf);
2658 
2659 out:
2660 	return ret;
2661 }
2662 
2663 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2664 {
2665 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2666 	DECLARE_WAITQUEUE(wait, current);
2667 	int temp;
2668 
2669 	/* ensure there are no more active urbs */
2670 	add_wait_queue(&unlink_wakeup, &wait);
2671 	set_current_state(TASK_UNINTERRUPTIBLE);
2672 	dev->wait = &unlink_wakeup;
2673 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2674 
2675 	/* maybe wait for deletions to finish. */
2676 	while (!skb_queue_empty(&dev->rxq) &&
2677 	       !skb_queue_empty(&dev->txq) &&
2678 	       !skb_queue_empty(&dev->done)) {
2679 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2680 		set_current_state(TASK_UNINTERRUPTIBLE);
2681 		netif_dbg(dev, ifdown, dev->net,
2682 			  "waited for %d urb completions\n", temp);
2683 	}
2684 	set_current_state(TASK_RUNNING);
2685 	dev->wait = NULL;
2686 	remove_wait_queue(&unlink_wakeup, &wait);
2687 }
2688 
2689 static int lan78xx_stop(struct net_device *net)
2690 {
2691 	struct lan78xx_net *dev = netdev_priv(net);
2692 
2693 	if (timer_pending(&dev->stat_monitor))
2694 		del_timer_sync(&dev->stat_monitor);
2695 
2696 	if (net->phydev)
2697 		phy_stop(net->phydev);
2698 
2699 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2700 	netif_stop_queue(net);
2701 
2702 	netif_info(dev, ifdown, dev->net,
2703 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2704 		   net->stats.rx_packets, net->stats.tx_packets,
2705 		   net->stats.rx_errors, net->stats.tx_errors);
2706 
2707 	lan78xx_terminate_urbs(dev);
2708 
2709 	usb_kill_urb(dev->urb_intr);
2710 
2711 	skb_queue_purge(&dev->rxq_pause);
2712 
2713 	/* deferred work (task, timer, softirq) must also stop.
2714 	 * can't flush_scheduled_work() until we drop rtnl (later),
2715 	 * else workers could deadlock; so make workers a NOP.
2716 	 */
2717 	dev->flags = 0;
2718 	cancel_delayed_work_sync(&dev->wq);
2719 	tasklet_kill(&dev->bh);
2720 
2721 	usb_autopm_put_interface(dev->intf);
2722 
2723 	return 0;
2724 }
2725 
2726 static int lan78xx_linearize(struct sk_buff *skb)
2727 {
2728 	return skb_linearize(skb);
2729 }
2730 
2731 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2732 				       struct sk_buff *skb, gfp_t flags)
2733 {
2734 	u32 tx_cmd_a, tx_cmd_b;
2735 	void *ptr;
2736 
2737 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2738 		dev_kfree_skb_any(skb);
2739 		return NULL;
2740 	}
2741 
2742 	if (lan78xx_linearize(skb) < 0)
2743 		return NULL;
2744 
2745 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2746 
2747 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2748 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2749 
2750 	tx_cmd_b = 0;
2751 	if (skb_is_gso(skb)) {
2752 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2753 
2754 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2755 
2756 		tx_cmd_a |= TX_CMD_A_LSO_;
2757 	}
2758 
2759 	if (skb_vlan_tag_present(skb)) {
2760 		tx_cmd_a |= TX_CMD_A_IVTG_;
2761 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2762 	}
2763 
2764 	ptr = skb_push(skb, 8);
2765 	put_unaligned_le32(tx_cmd_a, ptr);
2766 	put_unaligned_le32(tx_cmd_b, ptr + 4);
2767 
2768 	return skb;
2769 }
2770 
2771 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2772 			       struct sk_buff_head *list, enum skb_state state)
2773 {
2774 	unsigned long flags;
2775 	enum skb_state old_state;
2776 	struct skb_data *entry = (struct skb_data *)skb->cb;
2777 
2778 	spin_lock_irqsave(&list->lock, flags);
2779 	old_state = entry->state;
2780 	entry->state = state;
2781 
2782 	__skb_unlink(skb, list);
2783 	spin_unlock(&list->lock);
2784 	spin_lock(&dev->done.lock);
2785 
2786 	__skb_queue_tail(&dev->done, skb);
2787 	if (skb_queue_len(&dev->done) == 1)
2788 		tasklet_schedule(&dev->bh);
2789 	spin_unlock_irqrestore(&dev->done.lock, flags);
2790 
2791 	return old_state;
2792 }
2793 
2794 static void tx_complete(struct urb *urb)
2795 {
2796 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2797 	struct skb_data *entry = (struct skb_data *)skb->cb;
2798 	struct lan78xx_net *dev = entry->dev;
2799 
2800 	if (urb->status == 0) {
2801 		dev->net->stats.tx_packets += entry->num_of_packet;
2802 		dev->net->stats.tx_bytes += entry->length;
2803 	} else {
2804 		dev->net->stats.tx_errors++;
2805 
2806 		switch (urb->status) {
2807 		case -EPIPE:
2808 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2809 			break;
2810 
2811 		/* software-driven interface shutdown */
2812 		case -ECONNRESET:
2813 		case -ESHUTDOWN:
2814 			break;
2815 
2816 		case -EPROTO:
2817 		case -ETIME:
2818 		case -EILSEQ:
2819 			netif_stop_queue(dev->net);
2820 			break;
2821 		default:
2822 			netif_dbg(dev, tx_err, dev->net,
2823 				  "tx err %d\n", entry->urb->status);
2824 			break;
2825 		}
2826 	}
2827 
2828 	usb_autopm_put_interface_async(dev->intf);
2829 
2830 	defer_bh(dev, skb, &dev->txq, tx_done);
2831 }
2832 
2833 static void lan78xx_queue_skb(struct sk_buff_head *list,
2834 			      struct sk_buff *newsk, enum skb_state state)
2835 {
2836 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2837 
2838 	__skb_queue_tail(list, newsk);
2839 	entry->state = state;
2840 }
2841 
2842 static netdev_tx_t
2843 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2844 {
2845 	struct lan78xx_net *dev = netdev_priv(net);
2846 	struct sk_buff *skb2 = NULL;
2847 
2848 	if (skb) {
2849 		skb_tx_timestamp(skb);
2850 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2851 	}
2852 
2853 	if (skb2) {
2854 		skb_queue_tail(&dev->txq_pend, skb2);
2855 
2856 		/* throttle TX patch at slower than SUPER SPEED USB */
2857 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2858 		    (skb_queue_len(&dev->txq_pend) > 10))
2859 			netif_stop_queue(net);
2860 	} else {
2861 		netif_dbg(dev, tx_err, dev->net,
2862 			  "lan78xx_tx_prep return NULL\n");
2863 		dev->net->stats.tx_errors++;
2864 		dev->net->stats.tx_dropped++;
2865 	}
2866 
2867 	tasklet_schedule(&dev->bh);
2868 
2869 	return NETDEV_TX_OK;
2870 }
2871 
2872 static int
2873 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2874 {
2875 	int tmp;
2876 	struct usb_host_interface *alt = NULL;
2877 	struct usb_host_endpoint *in = NULL, *out = NULL;
2878 	struct usb_host_endpoint *status = NULL;
2879 
2880 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2881 		unsigned ep;
2882 
2883 		in = NULL;
2884 		out = NULL;
2885 		status = NULL;
2886 		alt = intf->altsetting + tmp;
2887 
2888 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2889 			struct usb_host_endpoint *e;
2890 			int intr = 0;
2891 
2892 			e = alt->endpoint + ep;
2893 			switch (e->desc.bmAttributes) {
2894 			case USB_ENDPOINT_XFER_INT:
2895 				if (!usb_endpoint_dir_in(&e->desc))
2896 					continue;
2897 				intr = 1;
2898 				/* FALLTHROUGH */
2899 			case USB_ENDPOINT_XFER_BULK:
2900 				break;
2901 			default:
2902 				continue;
2903 			}
2904 			if (usb_endpoint_dir_in(&e->desc)) {
2905 				if (!intr && !in)
2906 					in = e;
2907 				else if (intr && !status)
2908 					status = e;
2909 			} else {
2910 				if (!out)
2911 					out = e;
2912 			}
2913 		}
2914 		if (in && out)
2915 			break;
2916 	}
2917 	if (!alt || !in || !out)
2918 		return -EINVAL;
2919 
2920 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2921 				       in->desc.bEndpointAddress &
2922 				       USB_ENDPOINT_NUMBER_MASK);
2923 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2924 					out->desc.bEndpointAddress &
2925 					USB_ENDPOINT_NUMBER_MASK);
2926 	dev->ep_intr = status;
2927 
2928 	return 0;
2929 }
2930 
2931 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2932 {
2933 	struct lan78xx_priv *pdata = NULL;
2934 	int ret;
2935 	int i;
2936 
2937 	ret = lan78xx_get_endpoints(dev, intf);
2938 	if (ret) {
2939 		netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
2940 			    ret);
2941 		return ret;
2942 	}
2943 
2944 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2945 
2946 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2947 	if (!pdata) {
2948 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2949 		return -ENOMEM;
2950 	}
2951 
2952 	pdata->dev = dev;
2953 
2954 	spin_lock_init(&pdata->rfe_ctl_lock);
2955 	mutex_init(&pdata->dataport_mutex);
2956 
2957 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2958 
2959 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2960 		pdata->vlan_table[i] = 0;
2961 
2962 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2963 
2964 	dev->net->features = 0;
2965 
2966 	if (DEFAULT_TX_CSUM_ENABLE)
2967 		dev->net->features |= NETIF_F_HW_CSUM;
2968 
2969 	if (DEFAULT_RX_CSUM_ENABLE)
2970 		dev->net->features |= NETIF_F_RXCSUM;
2971 
2972 	if (DEFAULT_TSO_CSUM_ENABLE)
2973 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2974 
2975 	if (DEFAULT_VLAN_RX_OFFLOAD)
2976 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2977 
2978 	if (DEFAULT_VLAN_FILTER_ENABLE)
2979 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2980 
2981 	dev->net->hw_features = dev->net->features;
2982 
2983 	ret = lan78xx_setup_irq_domain(dev);
2984 	if (ret < 0) {
2985 		netdev_warn(dev->net,
2986 			    "lan78xx_setup_irq_domain() failed : %d", ret);
2987 		goto out1;
2988 	}
2989 
2990 	dev->net->hard_header_len += TX_OVERHEAD;
2991 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2992 
2993 	/* Init all registers */
2994 	ret = lan78xx_reset(dev);
2995 	if (ret) {
2996 		netdev_warn(dev->net, "Registers INIT FAILED....");
2997 		goto out2;
2998 	}
2999 
3000 	ret = lan78xx_mdio_init(dev);
3001 	if (ret) {
3002 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3003 		goto out2;
3004 	}
3005 
3006 	dev->net->flags |= IFF_MULTICAST;
3007 
3008 	pdata->wol = WAKE_MAGIC;
3009 
3010 	return ret;
3011 
3012 out2:
3013 	lan78xx_remove_irq_domain(dev);
3014 
3015 out1:
3016 	netdev_warn(dev->net, "Bind routine FAILED");
3017 	cancel_work_sync(&pdata->set_multicast);
3018 	cancel_work_sync(&pdata->set_vlan);
3019 	kfree(pdata);
3020 	return ret;
3021 }
3022 
3023 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3024 {
3025 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3026 
3027 	lan78xx_remove_irq_domain(dev);
3028 
3029 	lan78xx_remove_mdio(dev);
3030 
3031 	if (pdata) {
3032 		cancel_work_sync(&pdata->set_multicast);
3033 		cancel_work_sync(&pdata->set_vlan);
3034 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3035 		kfree(pdata);
3036 		pdata = NULL;
3037 		dev->data[0] = 0;
3038 	}
3039 }
3040 
3041 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3042 				    struct sk_buff *skb,
3043 				    u32 rx_cmd_a, u32 rx_cmd_b)
3044 {
3045 	/* HW Checksum offload appears to be flawed if used when not stripping
3046 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3047 	 */
3048 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3049 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3050 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3051 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3052 		skb->ip_summed = CHECKSUM_NONE;
3053 	} else {
3054 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3055 		skb->ip_summed = CHECKSUM_COMPLETE;
3056 	}
3057 }
3058 
3059 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3060 				    struct sk_buff *skb,
3061 				    u32 rx_cmd_a, u32 rx_cmd_b)
3062 {
3063 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3064 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3065 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3066 				       (rx_cmd_b & 0xffff));
3067 }
3068 
3069 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3070 {
3071 	int status;
3072 
3073 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3074 		skb_queue_tail(&dev->rxq_pause, skb);
3075 		return;
3076 	}
3077 
3078 	dev->net->stats.rx_packets++;
3079 	dev->net->stats.rx_bytes += skb->len;
3080 
3081 	skb->protocol = eth_type_trans(skb, dev->net);
3082 
3083 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3084 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3085 	memset(skb->cb, 0, sizeof(struct skb_data));
3086 
3087 	if (skb_defer_rx_timestamp(skb))
3088 		return;
3089 
3090 	status = netif_rx(skb);
3091 	if (status != NET_RX_SUCCESS)
3092 		netif_dbg(dev, rx_err, dev->net,
3093 			  "netif_rx status %d\n", status);
3094 }
3095 
3096 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3097 {
3098 	if (skb->len < dev->net->hard_header_len)
3099 		return 0;
3100 
3101 	while (skb->len > 0) {
3102 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3103 		u16 rx_cmd_c;
3104 		struct sk_buff *skb2;
3105 		unsigned char *packet;
3106 
3107 		rx_cmd_a = get_unaligned_le32(skb->data);
3108 		skb_pull(skb, sizeof(rx_cmd_a));
3109 
3110 		rx_cmd_b = get_unaligned_le32(skb->data);
3111 		skb_pull(skb, sizeof(rx_cmd_b));
3112 
3113 		rx_cmd_c = get_unaligned_le16(skb->data);
3114 		skb_pull(skb, sizeof(rx_cmd_c));
3115 
3116 		packet = skb->data;
3117 
3118 		/* get the packet length */
3119 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3120 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3121 
3122 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3123 			netif_dbg(dev, rx_err, dev->net,
3124 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3125 		} else {
3126 			/* last frame in this batch */
3127 			if (skb->len == size) {
3128 				lan78xx_rx_csum_offload(dev, skb,
3129 							rx_cmd_a, rx_cmd_b);
3130 				lan78xx_rx_vlan_offload(dev, skb,
3131 							rx_cmd_a, rx_cmd_b);
3132 
3133 				skb_trim(skb, skb->len - 4); /* remove fcs */
3134 				skb->truesize = size + sizeof(struct sk_buff);
3135 
3136 				return 1;
3137 			}
3138 
3139 			skb2 = skb_clone(skb, GFP_ATOMIC);
3140 			if (unlikely(!skb2)) {
3141 				netdev_warn(dev->net, "Error allocating skb");
3142 				return 0;
3143 			}
3144 
3145 			skb2->len = size;
3146 			skb2->data = packet;
3147 			skb_set_tail_pointer(skb2, size);
3148 
3149 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3150 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3151 
3152 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3153 			skb2->truesize = size + sizeof(struct sk_buff);
3154 
3155 			lan78xx_skb_return(dev, skb2);
3156 		}
3157 
3158 		skb_pull(skb, size);
3159 
3160 		/* padding bytes before the next frame starts */
3161 		if (skb->len)
3162 			skb_pull(skb, align_count);
3163 	}
3164 
3165 	return 1;
3166 }
3167 
3168 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3169 {
3170 	if (!lan78xx_rx(dev, skb)) {
3171 		dev->net->stats.rx_errors++;
3172 		goto done;
3173 	}
3174 
3175 	if (skb->len) {
3176 		lan78xx_skb_return(dev, skb);
3177 		return;
3178 	}
3179 
3180 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3181 	dev->net->stats.rx_errors++;
3182 done:
3183 	skb_queue_tail(&dev->done, skb);
3184 }
3185 
3186 static void rx_complete(struct urb *urb);
3187 
3188 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3189 {
3190 	struct sk_buff *skb;
3191 	struct skb_data *entry;
3192 	unsigned long lockflags;
3193 	size_t size = dev->rx_urb_size;
3194 	int ret = 0;
3195 
3196 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3197 	if (!skb) {
3198 		usb_free_urb(urb);
3199 		return -ENOMEM;
3200 	}
3201 
3202 	entry = (struct skb_data *)skb->cb;
3203 	entry->urb = urb;
3204 	entry->dev = dev;
3205 	entry->length = 0;
3206 
3207 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3208 			  skb->data, size, rx_complete, skb);
3209 
3210 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3211 
3212 	if (netif_device_present(dev->net) &&
3213 	    netif_running(dev->net) &&
3214 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3215 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3216 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3217 		switch (ret) {
3218 		case 0:
3219 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3220 			break;
3221 		case -EPIPE:
3222 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3223 			break;
3224 		case -ENODEV:
3225 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3226 			netif_device_detach(dev->net);
3227 			break;
3228 		case -EHOSTUNREACH:
3229 			ret = -ENOLINK;
3230 			break;
3231 		default:
3232 			netif_dbg(dev, rx_err, dev->net,
3233 				  "rx submit, %d\n", ret);
3234 			tasklet_schedule(&dev->bh);
3235 		}
3236 	} else {
3237 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3238 		ret = -ENOLINK;
3239 	}
3240 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3241 	if (ret) {
3242 		dev_kfree_skb_any(skb);
3243 		usb_free_urb(urb);
3244 	}
3245 	return ret;
3246 }
3247 
3248 static void rx_complete(struct urb *urb)
3249 {
3250 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3251 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3252 	struct lan78xx_net *dev = entry->dev;
3253 	int urb_status = urb->status;
3254 	enum skb_state state;
3255 
3256 	skb_put(skb, urb->actual_length);
3257 	state = rx_done;
3258 	entry->urb = NULL;
3259 
3260 	switch (urb_status) {
3261 	case 0:
3262 		if (skb->len < dev->net->hard_header_len) {
3263 			state = rx_cleanup;
3264 			dev->net->stats.rx_errors++;
3265 			dev->net->stats.rx_length_errors++;
3266 			netif_dbg(dev, rx_err, dev->net,
3267 				  "rx length %d\n", skb->len);
3268 		}
3269 		usb_mark_last_busy(dev->udev);
3270 		break;
3271 	case -EPIPE:
3272 		dev->net->stats.rx_errors++;
3273 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3274 		/* FALLTHROUGH */
3275 	case -ECONNRESET:				/* async unlink */
3276 	case -ESHUTDOWN:				/* hardware gone */
3277 		netif_dbg(dev, ifdown, dev->net,
3278 			  "rx shutdown, code %d\n", urb_status);
3279 		state = rx_cleanup;
3280 		entry->urb = urb;
3281 		urb = NULL;
3282 		break;
3283 	case -EPROTO:
3284 	case -ETIME:
3285 	case -EILSEQ:
3286 		dev->net->stats.rx_errors++;
3287 		state = rx_cleanup;
3288 		entry->urb = urb;
3289 		urb = NULL;
3290 		break;
3291 
3292 	/* data overrun ... flush fifo? */
3293 	case -EOVERFLOW:
3294 		dev->net->stats.rx_over_errors++;
3295 		/* FALLTHROUGH */
3296 
3297 	default:
3298 		state = rx_cleanup;
3299 		dev->net->stats.rx_errors++;
3300 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3301 		break;
3302 	}
3303 
3304 	state = defer_bh(dev, skb, &dev->rxq, state);
3305 
3306 	if (urb) {
3307 		if (netif_running(dev->net) &&
3308 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3309 		    state != unlink_start) {
3310 			rx_submit(dev, urb, GFP_ATOMIC);
3311 			return;
3312 		}
3313 		usb_free_urb(urb);
3314 	}
3315 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3316 }
3317 
3318 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3319 {
3320 	int length;
3321 	struct urb *urb = NULL;
3322 	struct skb_data *entry;
3323 	unsigned long flags;
3324 	struct sk_buff_head *tqp = &dev->txq_pend;
3325 	struct sk_buff *skb, *skb2;
3326 	int ret;
3327 	int count, pos;
3328 	int skb_totallen, pkt_cnt;
3329 
3330 	skb_totallen = 0;
3331 	pkt_cnt = 0;
3332 	count = 0;
3333 	length = 0;
3334 	spin_lock_irqsave(&tqp->lock, flags);
3335 	skb_queue_walk(tqp, skb) {
3336 		if (skb_is_gso(skb)) {
3337 			if (!skb_queue_is_first(tqp, skb)) {
3338 				/* handle previous packets first */
3339 				break;
3340 			}
3341 			count = 1;
3342 			length = skb->len - TX_OVERHEAD;
3343 			__skb_unlink(skb, tqp);
3344 			spin_unlock_irqrestore(&tqp->lock, flags);
3345 			goto gso_skb;
3346 		}
3347 
3348 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3349 			break;
3350 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3351 		pkt_cnt++;
3352 	}
3353 	spin_unlock_irqrestore(&tqp->lock, flags);
3354 
3355 	/* copy to a single skb */
3356 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3357 	if (!skb)
3358 		goto drop;
3359 
3360 	skb_put(skb, skb_totallen);
3361 
3362 	for (count = pos = 0; count < pkt_cnt; count++) {
3363 		skb2 = skb_dequeue(tqp);
3364 		if (skb2) {
3365 			length += (skb2->len - TX_OVERHEAD);
3366 			memcpy(skb->data + pos, skb2->data, skb2->len);
3367 			pos += roundup(skb2->len, sizeof(u32));
3368 			dev_kfree_skb(skb2);
3369 		}
3370 	}
3371 
3372 gso_skb:
3373 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3374 	if (!urb)
3375 		goto drop;
3376 
3377 	entry = (struct skb_data *)skb->cb;
3378 	entry->urb = urb;
3379 	entry->dev = dev;
3380 	entry->length = length;
3381 	entry->num_of_packet = count;
3382 
3383 	spin_lock_irqsave(&dev->txq.lock, flags);
3384 	ret = usb_autopm_get_interface_async(dev->intf);
3385 	if (ret < 0) {
3386 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3387 		goto drop;
3388 	}
3389 
3390 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3391 			  skb->data, skb->len, tx_complete, skb);
3392 
3393 	if (length % dev->maxpacket == 0) {
3394 		/* send USB_ZERO_PACKET */
3395 		urb->transfer_flags |= URB_ZERO_PACKET;
3396 	}
3397 
3398 #ifdef CONFIG_PM
3399 	/* if this triggers the device is still a sleep */
3400 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3401 		/* transmission will be done in resume */
3402 		usb_anchor_urb(urb, &dev->deferred);
3403 		/* no use to process more packets */
3404 		netif_stop_queue(dev->net);
3405 		usb_put_urb(urb);
3406 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3407 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3408 		return;
3409 	}
3410 #endif
3411 
3412 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3413 	switch (ret) {
3414 	case 0:
3415 		netif_trans_update(dev->net);
3416 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3417 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3418 			netif_stop_queue(dev->net);
3419 		break;
3420 	case -EPIPE:
3421 		netif_stop_queue(dev->net);
3422 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3423 		usb_autopm_put_interface_async(dev->intf);
3424 		break;
3425 	default:
3426 		usb_autopm_put_interface_async(dev->intf);
3427 		netif_dbg(dev, tx_err, dev->net,
3428 			  "tx: submit urb err %d\n", ret);
3429 		break;
3430 	}
3431 
3432 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3433 
3434 	if (ret) {
3435 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3436 drop:
3437 		dev->net->stats.tx_dropped++;
3438 		if (skb)
3439 			dev_kfree_skb_any(skb);
3440 		usb_free_urb(urb);
3441 	} else
3442 		netif_dbg(dev, tx_queued, dev->net,
3443 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3444 }
3445 
3446 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3447 {
3448 	struct urb *urb;
3449 	int i;
3450 
3451 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3452 		for (i = 0; i < 10; i++) {
3453 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3454 				break;
3455 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3456 			if (urb)
3457 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3458 					return;
3459 		}
3460 
3461 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3462 			tasklet_schedule(&dev->bh);
3463 	}
3464 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3465 		netif_wake_queue(dev->net);
3466 }
3467 
3468 static void lan78xx_bh(unsigned long param)
3469 {
3470 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3471 	struct sk_buff *skb;
3472 	struct skb_data *entry;
3473 
3474 	while ((skb = skb_dequeue(&dev->done))) {
3475 		entry = (struct skb_data *)(skb->cb);
3476 		switch (entry->state) {
3477 		case rx_done:
3478 			entry->state = rx_cleanup;
3479 			rx_process(dev, skb);
3480 			continue;
3481 		case tx_done:
3482 			usb_free_urb(entry->urb);
3483 			dev_kfree_skb(skb);
3484 			continue;
3485 		case rx_cleanup:
3486 			usb_free_urb(entry->urb);
3487 			dev_kfree_skb(skb);
3488 			continue;
3489 		default:
3490 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3491 			return;
3492 		}
3493 	}
3494 
3495 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3496 		/* reset update timer delta */
3497 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3498 			dev->delta = 1;
3499 			mod_timer(&dev->stat_monitor,
3500 				  jiffies + STAT_UPDATE_TIMER);
3501 		}
3502 
3503 		if (!skb_queue_empty(&dev->txq_pend))
3504 			lan78xx_tx_bh(dev);
3505 
3506 		if (!timer_pending(&dev->delay) &&
3507 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3508 			lan78xx_rx_bh(dev);
3509 	}
3510 }
3511 
3512 static void lan78xx_delayedwork(struct work_struct *work)
3513 {
3514 	int status;
3515 	struct lan78xx_net *dev;
3516 
3517 	dev = container_of(work, struct lan78xx_net, wq.work);
3518 
3519 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3520 		unlink_urbs(dev, &dev->txq);
3521 		status = usb_autopm_get_interface(dev->intf);
3522 		if (status < 0)
3523 			goto fail_pipe;
3524 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3525 		usb_autopm_put_interface(dev->intf);
3526 		if (status < 0 &&
3527 		    status != -EPIPE &&
3528 		    status != -ESHUTDOWN) {
3529 			if (netif_msg_tx_err(dev))
3530 fail_pipe:
3531 				netdev_err(dev->net,
3532 					   "can't clear tx halt, status %d\n",
3533 					   status);
3534 		} else {
3535 			clear_bit(EVENT_TX_HALT, &dev->flags);
3536 			if (status != -ESHUTDOWN)
3537 				netif_wake_queue(dev->net);
3538 		}
3539 	}
3540 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3541 		unlink_urbs(dev, &dev->rxq);
3542 		status = usb_autopm_get_interface(dev->intf);
3543 		if (status < 0)
3544 				goto fail_halt;
3545 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3546 		usb_autopm_put_interface(dev->intf);
3547 		if (status < 0 &&
3548 		    status != -EPIPE &&
3549 		    status != -ESHUTDOWN) {
3550 			if (netif_msg_rx_err(dev))
3551 fail_halt:
3552 				netdev_err(dev->net,
3553 					   "can't clear rx halt, status %d\n",
3554 					   status);
3555 		} else {
3556 			clear_bit(EVENT_RX_HALT, &dev->flags);
3557 			tasklet_schedule(&dev->bh);
3558 		}
3559 	}
3560 
3561 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3562 		int ret = 0;
3563 
3564 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3565 		status = usb_autopm_get_interface(dev->intf);
3566 		if (status < 0)
3567 			goto skip_reset;
3568 		if (lan78xx_link_reset(dev) < 0) {
3569 			usb_autopm_put_interface(dev->intf);
3570 skip_reset:
3571 			netdev_info(dev->net, "link reset failed (%d)\n",
3572 				    ret);
3573 		} else {
3574 			usb_autopm_put_interface(dev->intf);
3575 		}
3576 	}
3577 
3578 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3579 		lan78xx_update_stats(dev);
3580 
3581 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3582 
3583 		mod_timer(&dev->stat_monitor,
3584 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3585 
3586 		dev->delta = min((dev->delta * 2), 50);
3587 	}
3588 }
3589 
3590 static void intr_complete(struct urb *urb)
3591 {
3592 	struct lan78xx_net *dev = urb->context;
3593 	int status = urb->status;
3594 
3595 	switch (status) {
3596 	/* success */
3597 	case 0:
3598 		lan78xx_status(dev, urb);
3599 		break;
3600 
3601 	/* software-driven interface shutdown */
3602 	case -ENOENT:			/* urb killed */
3603 	case -ESHUTDOWN:		/* hardware gone */
3604 		netif_dbg(dev, ifdown, dev->net,
3605 			  "intr shutdown, code %d\n", status);
3606 		return;
3607 
3608 	/* NOTE:  not throttling like RX/TX, since this endpoint
3609 	 * already polls infrequently
3610 	 */
3611 	default:
3612 		netdev_dbg(dev->net, "intr status %d\n", status);
3613 		break;
3614 	}
3615 
3616 	if (!netif_running(dev->net))
3617 		return;
3618 
3619 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3620 	status = usb_submit_urb(urb, GFP_ATOMIC);
3621 	if (status != 0)
3622 		netif_err(dev, timer, dev->net,
3623 			  "intr resubmit --> %d\n", status);
3624 }
3625 
3626 static void lan78xx_disconnect(struct usb_interface *intf)
3627 {
3628 	struct lan78xx_net *dev;
3629 	struct usb_device *udev;
3630 	struct net_device *net;
3631 	struct phy_device *phydev;
3632 
3633 	dev = usb_get_intfdata(intf);
3634 	usb_set_intfdata(intf, NULL);
3635 	if (!dev)
3636 		return;
3637 
3638 	udev = interface_to_usbdev(intf);
3639 	net = dev->net;
3640 	phydev = net->phydev;
3641 
3642 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3643 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3644 
3645 	phy_disconnect(net->phydev);
3646 
3647 	if (phy_is_pseudo_fixed_link(phydev))
3648 		fixed_phy_unregister(phydev);
3649 
3650 	unregister_netdev(net);
3651 
3652 	cancel_delayed_work_sync(&dev->wq);
3653 
3654 	usb_scuttle_anchored_urbs(&dev->deferred);
3655 
3656 	lan78xx_unbind(dev, intf);
3657 
3658 	usb_kill_urb(dev->urb_intr);
3659 	usb_free_urb(dev->urb_intr);
3660 
3661 	free_netdev(net);
3662 	usb_put_dev(udev);
3663 }
3664 
3665 static void lan78xx_tx_timeout(struct net_device *net)
3666 {
3667 	struct lan78xx_net *dev = netdev_priv(net);
3668 
3669 	unlink_urbs(dev, &dev->txq);
3670 	tasklet_schedule(&dev->bh);
3671 }
3672 
3673 static const struct net_device_ops lan78xx_netdev_ops = {
3674 	.ndo_open		= lan78xx_open,
3675 	.ndo_stop		= lan78xx_stop,
3676 	.ndo_start_xmit		= lan78xx_start_xmit,
3677 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3678 	.ndo_change_mtu		= lan78xx_change_mtu,
3679 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3680 	.ndo_validate_addr	= eth_validate_addr,
3681 	.ndo_do_ioctl		= lan78xx_ioctl,
3682 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3683 	.ndo_set_features	= lan78xx_set_features,
3684 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3685 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3686 };
3687 
3688 static void lan78xx_stat_monitor(struct timer_list *t)
3689 {
3690 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3691 
3692 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3693 }
3694 
3695 static int lan78xx_probe(struct usb_interface *intf,
3696 			 const struct usb_device_id *id)
3697 {
3698 	struct lan78xx_net *dev;
3699 	struct net_device *netdev;
3700 	struct usb_device *udev;
3701 	int ret;
3702 	unsigned maxp;
3703 	unsigned period;
3704 	u8 *buf = NULL;
3705 
3706 	udev = interface_to_usbdev(intf);
3707 	udev = usb_get_dev(udev);
3708 
3709 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3710 	if (!netdev) {
3711 		dev_err(&intf->dev, "Error: OOM\n");
3712 		ret = -ENOMEM;
3713 		goto out1;
3714 	}
3715 
3716 	/* netdev_printk() needs this */
3717 	SET_NETDEV_DEV(netdev, &intf->dev);
3718 
3719 	dev = netdev_priv(netdev);
3720 	dev->udev = udev;
3721 	dev->intf = intf;
3722 	dev->net = netdev;
3723 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3724 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3725 
3726 	skb_queue_head_init(&dev->rxq);
3727 	skb_queue_head_init(&dev->txq);
3728 	skb_queue_head_init(&dev->done);
3729 	skb_queue_head_init(&dev->rxq_pause);
3730 	skb_queue_head_init(&dev->txq_pend);
3731 	mutex_init(&dev->phy_mutex);
3732 
3733 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3734 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3735 	init_usb_anchor(&dev->deferred);
3736 
3737 	netdev->netdev_ops = &lan78xx_netdev_ops;
3738 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3739 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3740 
3741 	dev->delta = 1;
3742 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3743 
3744 	mutex_init(&dev->stats.access_lock);
3745 
3746 	ret = lan78xx_bind(dev, intf);
3747 	if (ret < 0)
3748 		goto out2;
3749 
3750 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3751 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3752 
3753 	/* MTU range: 68 - 9000 */
3754 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3755 
3756 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3757 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3758 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3759 
3760 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3761 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3762 
3763 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3764 					dev->ep_intr->desc.bEndpointAddress &
3765 					USB_ENDPOINT_NUMBER_MASK);
3766 	period = dev->ep_intr->desc.bInterval;
3767 
3768 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3769 	buf = kmalloc(maxp, GFP_KERNEL);
3770 	if (buf) {
3771 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3772 		if (!dev->urb_intr) {
3773 			ret = -ENOMEM;
3774 			kfree(buf);
3775 			goto out3;
3776 		} else {
3777 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3778 					 dev->pipe_intr, buf, maxp,
3779 					 intr_complete, dev, period);
3780 		}
3781 	}
3782 
3783 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3784 
3785 	/* driver requires remote-wakeup capability during autosuspend. */
3786 	intf->needs_remote_wakeup = 1;
3787 
3788 	ret = lan78xx_phy_init(dev);
3789 	if (ret < 0)
3790 		goto out4;
3791 
3792 	ret = register_netdev(netdev);
3793 	if (ret != 0) {
3794 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3795 		goto out5;
3796 	}
3797 
3798 	usb_set_intfdata(intf, dev);
3799 
3800 	ret = device_set_wakeup_enable(&udev->dev, true);
3801 
3802 	 /* Default delay of 2sec has more overhead than advantage.
3803 	  * Set to 10sec as default.
3804 	  */
3805 	pm_runtime_set_autosuspend_delay(&udev->dev,
3806 					 DEFAULT_AUTOSUSPEND_DELAY);
3807 
3808 	return 0;
3809 
3810 out5:
3811 	phy_disconnect(netdev->phydev);
3812 out4:
3813 	usb_free_urb(dev->urb_intr);
3814 out3:
3815 	lan78xx_unbind(dev, intf);
3816 out2:
3817 	free_netdev(netdev);
3818 out1:
3819 	usb_put_dev(udev);
3820 
3821 	return ret;
3822 }
3823 
3824 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3825 {
3826 	const u16 crc16poly = 0x8005;
3827 	int i;
3828 	u16 bit, crc, msb;
3829 	u8 data;
3830 
3831 	crc = 0xFFFF;
3832 	for (i = 0; i < len; i++) {
3833 		data = *buf++;
3834 		for (bit = 0; bit < 8; bit++) {
3835 			msb = crc >> 15;
3836 			crc <<= 1;
3837 
3838 			if (msb ^ (u16)(data & 1)) {
3839 				crc ^= crc16poly;
3840 				crc |= (u16)0x0001U;
3841 			}
3842 			data >>= 1;
3843 		}
3844 	}
3845 
3846 	return crc;
3847 }
3848 
3849 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3850 {
3851 	u32 buf;
3852 	int ret;
3853 	int mask_index;
3854 	u16 crc;
3855 	u32 temp_wucsr;
3856 	u32 temp_pmt_ctl;
3857 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3858 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3859 	const u8 arp_type[2] = { 0x08, 0x06 };
3860 
3861 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3862 	buf &= ~MAC_TX_TXEN_;
3863 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3864 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3865 	buf &= ~MAC_RX_RXEN_;
3866 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3867 
3868 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3869 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3870 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3871 
3872 	temp_wucsr = 0;
3873 
3874 	temp_pmt_ctl = 0;
3875 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3876 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3877 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3878 
3879 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3880 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3881 
3882 	mask_index = 0;
3883 	if (wol & WAKE_PHY) {
3884 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3885 
3886 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3887 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3888 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3889 	}
3890 	if (wol & WAKE_MAGIC) {
3891 		temp_wucsr |= WUCSR_MPEN_;
3892 
3893 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3894 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3895 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3896 	}
3897 	if (wol & WAKE_BCAST) {
3898 		temp_wucsr |= WUCSR_BCST_EN_;
3899 
3900 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3901 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3902 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3903 	}
3904 	if (wol & WAKE_MCAST) {
3905 		temp_wucsr |= WUCSR_WAKE_EN_;
3906 
3907 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3908 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3909 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3910 					WUF_CFGX_EN_ |
3911 					WUF_CFGX_TYPE_MCAST_ |
3912 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3913 					(crc & WUF_CFGX_CRC16_MASK_));
3914 
3915 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3916 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3917 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3918 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3919 		mask_index++;
3920 
3921 		/* for IPv6 Multicast */
3922 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3923 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3924 					WUF_CFGX_EN_ |
3925 					WUF_CFGX_TYPE_MCAST_ |
3926 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3927 					(crc & WUF_CFGX_CRC16_MASK_));
3928 
3929 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3930 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3931 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3932 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3933 		mask_index++;
3934 
3935 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3936 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3937 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3938 	}
3939 	if (wol & WAKE_UCAST) {
3940 		temp_wucsr |= WUCSR_PFDA_EN_;
3941 
3942 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3943 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3944 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3945 	}
3946 	if (wol & WAKE_ARP) {
3947 		temp_wucsr |= WUCSR_WAKE_EN_;
3948 
3949 		/* set WUF_CFG & WUF_MASK
3950 		 * for packettype (offset 12,13) = ARP (0x0806)
3951 		 */
3952 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3953 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3954 					WUF_CFGX_EN_ |
3955 					WUF_CFGX_TYPE_ALL_ |
3956 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3957 					(crc & WUF_CFGX_CRC16_MASK_));
3958 
3959 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3960 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3961 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3962 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3963 		mask_index++;
3964 
3965 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3966 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3967 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3968 	}
3969 
3970 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3971 
3972 	/* when multiple WOL bits are set */
3973 	if (hweight_long((unsigned long)wol) > 1) {
3974 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3975 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3976 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3977 	}
3978 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3979 
3980 	/* clear WUPS */
3981 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3982 	buf |= PMT_CTL_WUPS_MASK_;
3983 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3984 
3985 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3986 	buf |= MAC_RX_RXEN_;
3987 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3988 
3989 	return 0;
3990 }
3991 
3992 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3993 {
3994 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3995 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3996 	u32 buf;
3997 	int ret;
3998 	int event;
3999 
4000 	event = message.event;
4001 
4002 	if (!dev->suspend_count++) {
4003 		spin_lock_irq(&dev->txq.lock);
4004 		/* don't autosuspend while transmitting */
4005 		if ((skb_queue_len(&dev->txq) ||
4006 		     skb_queue_len(&dev->txq_pend)) &&
4007 			PMSG_IS_AUTO(message)) {
4008 			spin_unlock_irq(&dev->txq.lock);
4009 			ret = -EBUSY;
4010 			goto out;
4011 		} else {
4012 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4013 			spin_unlock_irq(&dev->txq.lock);
4014 		}
4015 
4016 		/* stop TX & RX */
4017 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4018 		buf &= ~MAC_TX_TXEN_;
4019 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
4020 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4021 		buf &= ~MAC_RX_RXEN_;
4022 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
4023 
4024 		/* empty out the rx and queues */
4025 		netif_device_detach(dev->net);
4026 		lan78xx_terminate_urbs(dev);
4027 		usb_kill_urb(dev->urb_intr);
4028 
4029 		/* reattach */
4030 		netif_device_attach(dev->net);
4031 	}
4032 
4033 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4034 		del_timer(&dev->stat_monitor);
4035 
4036 		if (PMSG_IS_AUTO(message)) {
4037 			/* auto suspend (selective suspend) */
4038 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4039 			buf &= ~MAC_TX_TXEN_;
4040 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
4041 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4042 			buf &= ~MAC_RX_RXEN_;
4043 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4044 
4045 			ret = lan78xx_write_reg(dev, WUCSR, 0);
4046 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
4047 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4048 
4049 			/* set goodframe wakeup */
4050 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
4051 
4052 			buf |= WUCSR_RFE_WAKE_EN_;
4053 			buf |= WUCSR_STORE_WAKE_;
4054 
4055 			ret = lan78xx_write_reg(dev, WUCSR, buf);
4056 
4057 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4058 
4059 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4060 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
4061 
4062 			buf |= PMT_CTL_PHY_WAKE_EN_;
4063 			buf |= PMT_CTL_WOL_EN_;
4064 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
4065 			buf |= PMT_CTL_SUS_MODE_3_;
4066 
4067 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4068 
4069 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4070 
4071 			buf |= PMT_CTL_WUPS_MASK_;
4072 
4073 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4074 
4075 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4076 			buf |= MAC_RX_RXEN_;
4077 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4078 		} else {
4079 			lan78xx_set_suspend(dev, pdata->wol);
4080 		}
4081 	}
4082 
4083 	ret = 0;
4084 out:
4085 	return ret;
4086 }
4087 
4088 static int lan78xx_resume(struct usb_interface *intf)
4089 {
4090 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4091 	struct sk_buff *skb;
4092 	struct urb *res;
4093 	int ret;
4094 	u32 buf;
4095 
4096 	if (!timer_pending(&dev->stat_monitor)) {
4097 		dev->delta = 1;
4098 		mod_timer(&dev->stat_monitor,
4099 			  jiffies + STAT_UPDATE_TIMER);
4100 	}
4101 
4102 	if (!--dev->suspend_count) {
4103 		/* resume interrupt URBs */
4104 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4105 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
4106 
4107 		spin_lock_irq(&dev->txq.lock);
4108 		while ((res = usb_get_from_anchor(&dev->deferred))) {
4109 			skb = (struct sk_buff *)res->context;
4110 			ret = usb_submit_urb(res, GFP_ATOMIC);
4111 			if (ret < 0) {
4112 				dev_kfree_skb_any(skb);
4113 				usb_free_urb(res);
4114 				usb_autopm_put_interface_async(dev->intf);
4115 			} else {
4116 				netif_trans_update(dev->net);
4117 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
4118 			}
4119 		}
4120 
4121 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4122 		spin_unlock_irq(&dev->txq.lock);
4123 
4124 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4125 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4126 				netif_start_queue(dev->net);
4127 			tasklet_schedule(&dev->bh);
4128 		}
4129 	}
4130 
4131 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4132 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4133 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4134 
4135 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4136 					     WUCSR2_ARP_RCD_ |
4137 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4138 					     WUCSR2_IPV4_TCPSYN_RCD_);
4139 
4140 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4141 					    WUCSR_EEE_RX_WAKE_ |
4142 					    WUCSR_PFDA_FR_ |
4143 					    WUCSR_RFE_WAKE_FR_ |
4144 					    WUCSR_WUFR_ |
4145 					    WUCSR_MPR_ |
4146 					    WUCSR_BCST_FR_);
4147 
4148 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4149 	buf |= MAC_TX_TXEN_;
4150 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
4151 
4152 	return 0;
4153 }
4154 
4155 static int lan78xx_reset_resume(struct usb_interface *intf)
4156 {
4157 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4158 
4159 	lan78xx_reset(dev);
4160 
4161 	phy_start(dev->net->phydev);
4162 
4163 	return lan78xx_resume(intf);
4164 }
4165 
4166 static const struct usb_device_id products[] = {
4167 	{
4168 	/* LAN7800 USB Gigabit Ethernet Device */
4169 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4170 	},
4171 	{
4172 	/* LAN7850 USB Gigabit Ethernet Device */
4173 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4174 	},
4175 	{
4176 	/* LAN7801 USB Gigabit Ethernet Device */
4177 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4178 	},
4179 	{},
4180 };
4181 MODULE_DEVICE_TABLE(usb, products);
4182 
4183 static struct usb_driver lan78xx_driver = {
4184 	.name			= DRIVER_NAME,
4185 	.id_table		= products,
4186 	.probe			= lan78xx_probe,
4187 	.disconnect		= lan78xx_disconnect,
4188 	.suspend		= lan78xx_suspend,
4189 	.resume			= lan78xx_resume,
4190 	.reset_resume		= lan78xx_reset_resume,
4191 	.supports_autosuspend	= 1,
4192 	.disable_hub_initiated_lpm = 1,
4193 };
4194 
4195 module_usb_driver(lan78xx_driver);
4196 
4197 MODULE_AUTHOR(DRIVER_AUTHOR);
4198 MODULE_DESCRIPTION(DRIVER_DESC);
4199 MODULE_LICENSE("GPL");
4200