xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision b39b46fb)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33 
34 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME	"lan78xx"
37 
38 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
39 #define THROTTLE_JIFFIES		(HZ / 8)
40 #define UNLINK_TIMEOUT_MS		3
41 
42 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
43 
44 #define SS_USB_PKT_SIZE			(1024)
45 #define HS_USB_PKT_SIZE			(512)
46 #define FS_USB_PKT_SIZE			(64)
47 
48 #define MAX_RX_FIFO_SIZE		(12 * 1024)
49 #define MAX_TX_FIFO_SIZE		(12 * 1024)
50 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
51 #define DEFAULT_BULK_IN_DELAY		(0x0800)
52 #define MAX_SINGLE_PACKET_SIZE		(9000)
53 #define DEFAULT_TX_CSUM_ENABLE		(true)
54 #define DEFAULT_RX_CSUM_ENABLE		(true)
55 #define DEFAULT_TSO_CSUM_ENABLE		(true)
56 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
57 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
58 #define TX_OVERHEAD			(8)
59 #define RXW_PADDING			2
60 
61 #define LAN78XX_USB_VENDOR_ID		(0x0424)
62 #define LAN7800_USB_PRODUCT_ID		(0x7800)
63 #define LAN7850_USB_PRODUCT_ID		(0x7850)
64 #define LAN7801_USB_PRODUCT_ID		(0x7801)
65 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
66 #define LAN78XX_OTP_MAGIC		(0x78F3)
67 
68 #define	MII_READ			1
69 #define	MII_WRITE			0
70 
71 #define EEPROM_INDICATOR		(0xA5)
72 #define EEPROM_MAC_OFFSET		(0x01)
73 #define MAX_EEPROM_SIZE			512
74 #define OTP_INDICATOR_1			(0xF3)
75 #define OTP_INDICATOR_2			(0xF7)
76 
77 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
78 					 WAKE_MCAST | WAKE_BCAST | \
79 					 WAKE_ARP | WAKE_MAGIC)
80 
81 /* USB related defines */
82 #define BULK_IN_PIPE			1
83 #define BULK_OUT_PIPE			2
84 
85 /* default autosuspend delay (mSec)*/
86 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
87 
88 /* statistic update interval (mSec) */
89 #define STAT_UPDATE_TIMER		(1 * 1000)
90 
91 /* defines interrupts from interrupt EP */
92 #define MAX_INT_EP			(32)
93 #define INT_EP_INTEP			(31)
94 #define INT_EP_OTP_WR_DONE		(28)
95 #define INT_EP_EEE_TX_LPI_START		(26)
96 #define INT_EP_EEE_TX_LPI_STOP		(25)
97 #define INT_EP_EEE_RX_LPI		(24)
98 #define INT_EP_MAC_RESET_TIMEOUT	(23)
99 #define INT_EP_RDFO			(22)
100 #define INT_EP_TXE			(21)
101 #define INT_EP_USB_STATUS		(20)
102 #define INT_EP_TX_DIS			(19)
103 #define INT_EP_RX_DIS			(18)
104 #define INT_EP_PHY			(17)
105 #define INT_EP_DP			(16)
106 #define INT_EP_MAC_ERR			(15)
107 #define INT_EP_TDFU			(14)
108 #define INT_EP_TDFO			(13)
109 #define INT_EP_UTX			(12)
110 #define INT_EP_GPIO_11			(11)
111 #define INT_EP_GPIO_10			(10)
112 #define INT_EP_GPIO_9			(9)
113 #define INT_EP_GPIO_8			(8)
114 #define INT_EP_GPIO_7			(7)
115 #define INT_EP_GPIO_6			(6)
116 #define INT_EP_GPIO_5			(5)
117 #define INT_EP_GPIO_4			(4)
118 #define INT_EP_GPIO_3			(3)
119 #define INT_EP_GPIO_2			(2)
120 #define INT_EP_GPIO_1			(1)
121 #define INT_EP_GPIO_0			(0)
122 
123 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
124 	"RX FCS Errors",
125 	"RX Alignment Errors",
126 	"Rx Fragment Errors",
127 	"RX Jabber Errors",
128 	"RX Undersize Frame Errors",
129 	"RX Oversize Frame Errors",
130 	"RX Dropped Frames",
131 	"RX Unicast Byte Count",
132 	"RX Broadcast Byte Count",
133 	"RX Multicast Byte Count",
134 	"RX Unicast Frames",
135 	"RX Broadcast Frames",
136 	"RX Multicast Frames",
137 	"RX Pause Frames",
138 	"RX 64 Byte Frames",
139 	"RX 65 - 127 Byte Frames",
140 	"RX 128 - 255 Byte Frames",
141 	"RX 256 - 511 Bytes Frames",
142 	"RX 512 - 1023 Byte Frames",
143 	"RX 1024 - 1518 Byte Frames",
144 	"RX Greater 1518 Byte Frames",
145 	"EEE RX LPI Transitions",
146 	"EEE RX LPI Time",
147 	"TX FCS Errors",
148 	"TX Excess Deferral Errors",
149 	"TX Carrier Errors",
150 	"TX Bad Byte Count",
151 	"TX Single Collisions",
152 	"TX Multiple Collisions",
153 	"TX Excessive Collision",
154 	"TX Late Collisions",
155 	"TX Unicast Byte Count",
156 	"TX Broadcast Byte Count",
157 	"TX Multicast Byte Count",
158 	"TX Unicast Frames",
159 	"TX Broadcast Frames",
160 	"TX Multicast Frames",
161 	"TX Pause Frames",
162 	"TX 64 Byte Frames",
163 	"TX 65 - 127 Byte Frames",
164 	"TX 128 - 255 Byte Frames",
165 	"TX 256 - 511 Bytes Frames",
166 	"TX 512 - 1023 Byte Frames",
167 	"TX 1024 - 1518 Byte Frames",
168 	"TX Greater 1518 Byte Frames",
169 	"EEE TX LPI Transitions",
170 	"EEE TX LPI Time",
171 };
172 
173 struct lan78xx_statstage {
174 	u32 rx_fcs_errors;
175 	u32 rx_alignment_errors;
176 	u32 rx_fragment_errors;
177 	u32 rx_jabber_errors;
178 	u32 rx_undersize_frame_errors;
179 	u32 rx_oversize_frame_errors;
180 	u32 rx_dropped_frames;
181 	u32 rx_unicast_byte_count;
182 	u32 rx_broadcast_byte_count;
183 	u32 rx_multicast_byte_count;
184 	u32 rx_unicast_frames;
185 	u32 rx_broadcast_frames;
186 	u32 rx_multicast_frames;
187 	u32 rx_pause_frames;
188 	u32 rx_64_byte_frames;
189 	u32 rx_65_127_byte_frames;
190 	u32 rx_128_255_byte_frames;
191 	u32 rx_256_511_bytes_frames;
192 	u32 rx_512_1023_byte_frames;
193 	u32 rx_1024_1518_byte_frames;
194 	u32 rx_greater_1518_byte_frames;
195 	u32 eee_rx_lpi_transitions;
196 	u32 eee_rx_lpi_time;
197 	u32 tx_fcs_errors;
198 	u32 tx_excess_deferral_errors;
199 	u32 tx_carrier_errors;
200 	u32 tx_bad_byte_count;
201 	u32 tx_single_collisions;
202 	u32 tx_multiple_collisions;
203 	u32 tx_excessive_collision;
204 	u32 tx_late_collisions;
205 	u32 tx_unicast_byte_count;
206 	u32 tx_broadcast_byte_count;
207 	u32 tx_multicast_byte_count;
208 	u32 tx_unicast_frames;
209 	u32 tx_broadcast_frames;
210 	u32 tx_multicast_frames;
211 	u32 tx_pause_frames;
212 	u32 tx_64_byte_frames;
213 	u32 tx_65_127_byte_frames;
214 	u32 tx_128_255_byte_frames;
215 	u32 tx_256_511_bytes_frames;
216 	u32 tx_512_1023_byte_frames;
217 	u32 tx_1024_1518_byte_frames;
218 	u32 tx_greater_1518_byte_frames;
219 	u32 eee_tx_lpi_transitions;
220 	u32 eee_tx_lpi_time;
221 };
222 
223 struct lan78xx_statstage64 {
224 	u64 rx_fcs_errors;
225 	u64 rx_alignment_errors;
226 	u64 rx_fragment_errors;
227 	u64 rx_jabber_errors;
228 	u64 rx_undersize_frame_errors;
229 	u64 rx_oversize_frame_errors;
230 	u64 rx_dropped_frames;
231 	u64 rx_unicast_byte_count;
232 	u64 rx_broadcast_byte_count;
233 	u64 rx_multicast_byte_count;
234 	u64 rx_unicast_frames;
235 	u64 rx_broadcast_frames;
236 	u64 rx_multicast_frames;
237 	u64 rx_pause_frames;
238 	u64 rx_64_byte_frames;
239 	u64 rx_65_127_byte_frames;
240 	u64 rx_128_255_byte_frames;
241 	u64 rx_256_511_bytes_frames;
242 	u64 rx_512_1023_byte_frames;
243 	u64 rx_1024_1518_byte_frames;
244 	u64 rx_greater_1518_byte_frames;
245 	u64 eee_rx_lpi_transitions;
246 	u64 eee_rx_lpi_time;
247 	u64 tx_fcs_errors;
248 	u64 tx_excess_deferral_errors;
249 	u64 tx_carrier_errors;
250 	u64 tx_bad_byte_count;
251 	u64 tx_single_collisions;
252 	u64 tx_multiple_collisions;
253 	u64 tx_excessive_collision;
254 	u64 tx_late_collisions;
255 	u64 tx_unicast_byte_count;
256 	u64 tx_broadcast_byte_count;
257 	u64 tx_multicast_byte_count;
258 	u64 tx_unicast_frames;
259 	u64 tx_broadcast_frames;
260 	u64 tx_multicast_frames;
261 	u64 tx_pause_frames;
262 	u64 tx_64_byte_frames;
263 	u64 tx_65_127_byte_frames;
264 	u64 tx_128_255_byte_frames;
265 	u64 tx_256_511_bytes_frames;
266 	u64 tx_512_1023_byte_frames;
267 	u64 tx_1024_1518_byte_frames;
268 	u64 tx_greater_1518_byte_frames;
269 	u64 eee_tx_lpi_transitions;
270 	u64 eee_tx_lpi_time;
271 };
272 
273 static u32 lan78xx_regs[] = {
274 	ID_REV,
275 	INT_STS,
276 	HW_CFG,
277 	PMT_CTL,
278 	E2P_CMD,
279 	E2P_DATA,
280 	USB_STATUS,
281 	VLAN_TYPE,
282 	MAC_CR,
283 	MAC_RX,
284 	MAC_TX,
285 	FLOW,
286 	ERR_STS,
287 	MII_ACC,
288 	MII_DATA,
289 	EEE_TX_LPI_REQ_DLY,
290 	EEE_TW_TX_SYS,
291 	EEE_TX_LPI_REM_DLY,
292 	WUCSR
293 };
294 
295 #define PHY_REG_SIZE (32 * sizeof(u32))
296 
297 struct lan78xx_net;
298 
299 struct lan78xx_priv {
300 	struct lan78xx_net *dev;
301 	u32 rfe_ctl;
302 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
303 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
304 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
305 	struct mutex dataport_mutex; /* for dataport access */
306 	spinlock_t rfe_ctl_lock; /* for rfe register access */
307 	struct work_struct set_multicast;
308 	struct work_struct set_vlan;
309 	u32 wol;
310 };
311 
312 enum skb_state {
313 	illegal = 0,
314 	tx_start,
315 	tx_done,
316 	rx_start,
317 	rx_done,
318 	rx_cleanup,
319 	unlink_start
320 };
321 
322 struct skb_data {		/* skb->cb is one of these */
323 	struct urb *urb;
324 	struct lan78xx_net *dev;
325 	enum skb_state state;
326 	size_t length;
327 	int num_of_packet;
328 };
329 
330 struct usb_context {
331 	struct usb_ctrlrequest req;
332 	struct lan78xx_net *dev;
333 };
334 
335 #define EVENT_TX_HALT			0
336 #define EVENT_RX_HALT			1
337 #define EVENT_RX_MEMORY			2
338 #define EVENT_STS_SPLIT			3
339 #define EVENT_LINK_RESET		4
340 #define EVENT_RX_PAUSED			5
341 #define EVENT_DEV_WAKING		6
342 #define EVENT_DEV_ASLEEP		7
343 #define EVENT_DEV_OPEN			8
344 #define EVENT_STAT_UPDATE		9
345 
346 struct statstage {
347 	struct mutex			access_lock;	/* for stats access */
348 	struct lan78xx_statstage	saved;
349 	struct lan78xx_statstage	rollover_count;
350 	struct lan78xx_statstage	rollover_max;
351 	struct lan78xx_statstage64	curr_stat;
352 };
353 
354 struct irq_domain_data {
355 	struct irq_domain	*irqdomain;
356 	unsigned int		phyirq;
357 	struct irq_chip		*irqchip;
358 	irq_flow_handler_t	irq_handler;
359 	u32			irqenable;
360 	struct mutex		irq_lock;		/* for irq bus access */
361 };
362 
363 struct lan78xx_net {
364 	struct net_device	*net;
365 	struct usb_device	*udev;
366 	struct usb_interface	*intf;
367 	void			*driver_priv;
368 
369 	int			rx_qlen;
370 	int			tx_qlen;
371 	struct sk_buff_head	rxq;
372 	struct sk_buff_head	txq;
373 	struct sk_buff_head	done;
374 	struct sk_buff_head	rxq_pause;
375 	struct sk_buff_head	txq_pend;
376 
377 	struct tasklet_struct	bh;
378 	struct delayed_work	wq;
379 
380 	struct usb_host_endpoint *ep_blkin;
381 	struct usb_host_endpoint *ep_blkout;
382 	struct usb_host_endpoint *ep_intr;
383 
384 	int			msg_enable;
385 
386 	struct urb		*urb_intr;
387 	struct usb_anchor	deferred;
388 
389 	struct mutex		phy_mutex; /* for phy access */
390 	unsigned		pipe_in, pipe_out, pipe_intr;
391 
392 	u32			hard_mtu;	/* count any extra framing */
393 	size_t			rx_urb_size;	/* size for rx urbs */
394 
395 	unsigned long		flags;
396 
397 	wait_queue_head_t	*wait;
398 	unsigned char		suspend_count;
399 
400 	unsigned		maxpacket;
401 	struct timer_list	delay;
402 	struct timer_list	stat_monitor;
403 
404 	unsigned long		data[5];
405 
406 	int			link_on;
407 	u8			mdix_ctrl;
408 
409 	u32			chipid;
410 	u32			chiprev;
411 	struct mii_bus		*mdiobus;
412 	phy_interface_t		interface;
413 
414 	int			fc_autoneg;
415 	u8			fc_request_control;
416 
417 	int			delta;
418 	struct statstage	stats;
419 
420 	struct irq_domain_data	domain_data;
421 };
422 
423 /* define external phy id */
424 #define	PHY_LAN8835			(0x0007C130)
425 #define	PHY_KSZ9031RNX			(0x00221620)
426 
427 /* use ethtool to change the level for any given device */
428 static int msg_level = -1;
429 module_param(msg_level, int, 0);
430 MODULE_PARM_DESC(msg_level, "Override default message level");
431 
432 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
433 {
434 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
435 	int ret;
436 
437 	if (!buf)
438 		return -ENOMEM;
439 
440 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
441 			      USB_VENDOR_REQUEST_READ_REGISTER,
442 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
443 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
444 	if (likely(ret >= 0)) {
445 		le32_to_cpus(buf);
446 		*data = *buf;
447 	} else {
448 		netdev_warn(dev->net,
449 			    "Failed to read register index 0x%08x. ret = %d",
450 			    index, ret);
451 	}
452 
453 	kfree(buf);
454 
455 	return ret;
456 }
457 
458 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
459 {
460 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
461 	int ret;
462 
463 	if (!buf)
464 		return -ENOMEM;
465 
466 	*buf = data;
467 	cpu_to_le32s(buf);
468 
469 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
470 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
471 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
472 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
473 	if (unlikely(ret < 0)) {
474 		netdev_warn(dev->net,
475 			    "Failed to write register index 0x%08x. ret = %d",
476 			    index, ret);
477 	}
478 
479 	kfree(buf);
480 
481 	return ret;
482 }
483 
484 static int lan78xx_read_stats(struct lan78xx_net *dev,
485 			      struct lan78xx_statstage *data)
486 {
487 	int ret = 0;
488 	int i;
489 	struct lan78xx_statstage *stats;
490 	u32 *src;
491 	u32 *dst;
492 
493 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
494 	if (!stats)
495 		return -ENOMEM;
496 
497 	ret = usb_control_msg(dev->udev,
498 			      usb_rcvctrlpipe(dev->udev, 0),
499 			      USB_VENDOR_REQUEST_GET_STATS,
500 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
501 			      0,
502 			      0,
503 			      (void *)stats,
504 			      sizeof(*stats),
505 			      USB_CTRL_SET_TIMEOUT);
506 	if (likely(ret >= 0)) {
507 		src = (u32 *)stats;
508 		dst = (u32 *)data;
509 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
510 			le32_to_cpus(&src[i]);
511 			dst[i] = src[i];
512 		}
513 	} else {
514 		netdev_warn(dev->net,
515 			    "Failed to read stat ret = %d", ret);
516 	}
517 
518 	kfree(stats);
519 
520 	return ret;
521 }
522 
523 #define check_counter_rollover(struct1, dev_stats, member) {	\
524 	if (struct1->member < dev_stats.saved.member)		\
525 		dev_stats.rollover_count.member++;		\
526 	}
527 
528 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
529 					struct lan78xx_statstage *stats)
530 {
531 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
532 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
533 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
534 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
535 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
536 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
537 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
538 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
539 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
540 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
541 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
542 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
543 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
544 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
545 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
546 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
547 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
548 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
549 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
550 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
551 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
552 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
553 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
554 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
555 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
556 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
557 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
558 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
559 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
560 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
561 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
562 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
563 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
564 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
565 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
566 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
567 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
568 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
569 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
570 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
571 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
572 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
573 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
574 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
575 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
576 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
577 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
578 
579 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
580 }
581 
582 static void lan78xx_update_stats(struct lan78xx_net *dev)
583 {
584 	u32 *p, *count, *max;
585 	u64 *data;
586 	int i;
587 	struct lan78xx_statstage lan78xx_stats;
588 
589 	if (usb_autopm_get_interface(dev->intf) < 0)
590 		return;
591 
592 	p = (u32 *)&lan78xx_stats;
593 	count = (u32 *)&dev->stats.rollover_count;
594 	max = (u32 *)&dev->stats.rollover_max;
595 	data = (u64 *)&dev->stats.curr_stat;
596 
597 	mutex_lock(&dev->stats.access_lock);
598 
599 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
600 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
601 
602 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
603 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
604 
605 	mutex_unlock(&dev->stats.access_lock);
606 
607 	usb_autopm_put_interface(dev->intf);
608 }
609 
610 /* Loop until the read is completed with timeout called with phy_mutex held */
611 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
612 {
613 	unsigned long start_time = jiffies;
614 	u32 val;
615 	int ret;
616 
617 	do {
618 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
619 		if (unlikely(ret < 0))
620 			return -EIO;
621 
622 		if (!(val & MII_ACC_MII_BUSY_))
623 			return 0;
624 	} while (!time_after(jiffies, start_time + HZ));
625 
626 	return -EIO;
627 }
628 
629 static inline u32 mii_access(int id, int index, int read)
630 {
631 	u32 ret;
632 
633 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
634 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
635 	if (read)
636 		ret |= MII_ACC_MII_READ_;
637 	else
638 		ret |= MII_ACC_MII_WRITE_;
639 	ret |= MII_ACC_MII_BUSY_;
640 
641 	return ret;
642 }
643 
644 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
645 {
646 	unsigned long start_time = jiffies;
647 	u32 val;
648 	int ret;
649 
650 	do {
651 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
652 		if (unlikely(ret < 0))
653 			return -EIO;
654 
655 		if (!(val & E2P_CMD_EPC_BUSY_) ||
656 		    (val & E2P_CMD_EPC_TIMEOUT_))
657 			break;
658 		usleep_range(40, 100);
659 	} while (!time_after(jiffies, start_time + HZ));
660 
661 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
662 		netdev_warn(dev->net, "EEPROM read operation timeout");
663 		return -EIO;
664 	}
665 
666 	return 0;
667 }
668 
669 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
670 {
671 	unsigned long start_time = jiffies;
672 	u32 val;
673 	int ret;
674 
675 	do {
676 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
677 		if (unlikely(ret < 0))
678 			return -EIO;
679 
680 		if (!(val & E2P_CMD_EPC_BUSY_))
681 			return 0;
682 
683 		usleep_range(40, 100);
684 	} while (!time_after(jiffies, start_time + HZ));
685 
686 	netdev_warn(dev->net, "EEPROM is busy");
687 	return -EIO;
688 }
689 
690 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
691 				   u32 length, u8 *data)
692 {
693 	u32 val;
694 	u32 saved;
695 	int i, ret;
696 	int retval;
697 
698 	/* depends on chip, some EEPROM pins are muxed with LED function.
699 	 * disable & restore LED function to access EEPROM.
700 	 */
701 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
702 	saved = val;
703 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
704 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
705 		ret = lan78xx_write_reg(dev, HW_CFG, val);
706 	}
707 
708 	retval = lan78xx_eeprom_confirm_not_busy(dev);
709 	if (retval)
710 		return retval;
711 
712 	for (i = 0; i < length; i++) {
713 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
714 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
715 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
716 		if (unlikely(ret < 0)) {
717 			retval = -EIO;
718 			goto exit;
719 		}
720 
721 		retval = lan78xx_wait_eeprom(dev);
722 		if (retval < 0)
723 			goto exit;
724 
725 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
726 		if (unlikely(ret < 0)) {
727 			retval = -EIO;
728 			goto exit;
729 		}
730 
731 		data[i] = val & 0xFF;
732 		offset++;
733 	}
734 
735 	retval = 0;
736 exit:
737 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
738 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
739 
740 	return retval;
741 }
742 
743 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
744 			       u32 length, u8 *data)
745 {
746 	u8 sig;
747 	int ret;
748 
749 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
750 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
751 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
752 	else
753 		ret = -EINVAL;
754 
755 	return ret;
756 }
757 
758 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
759 				    u32 length, u8 *data)
760 {
761 	u32 val;
762 	u32 saved;
763 	int i, ret;
764 	int retval;
765 
766 	/* depends on chip, some EEPROM pins are muxed with LED function.
767 	 * disable & restore LED function to access EEPROM.
768 	 */
769 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
770 	saved = val;
771 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
772 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
773 		ret = lan78xx_write_reg(dev, HW_CFG, val);
774 	}
775 
776 	retval = lan78xx_eeprom_confirm_not_busy(dev);
777 	if (retval)
778 		goto exit;
779 
780 	/* Issue write/erase enable command */
781 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
782 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
783 	if (unlikely(ret < 0)) {
784 		retval = -EIO;
785 		goto exit;
786 	}
787 
788 	retval = lan78xx_wait_eeprom(dev);
789 	if (retval < 0)
790 		goto exit;
791 
792 	for (i = 0; i < length; i++) {
793 		/* Fill data register */
794 		val = data[i];
795 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
796 		if (ret < 0) {
797 			retval = -EIO;
798 			goto exit;
799 		}
800 
801 		/* Send "write" command */
802 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
803 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
804 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
805 		if (ret < 0) {
806 			retval = -EIO;
807 			goto exit;
808 		}
809 
810 		retval = lan78xx_wait_eeprom(dev);
811 		if (retval < 0)
812 			goto exit;
813 
814 		offset++;
815 	}
816 
817 	retval = 0;
818 exit:
819 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
820 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
821 
822 	return retval;
823 }
824 
825 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
826 				u32 length, u8 *data)
827 {
828 	int i;
829 	int ret;
830 	u32 buf;
831 	unsigned long timeout;
832 
833 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
834 
835 	if (buf & OTP_PWR_DN_PWRDN_N_) {
836 		/* clear it and wait to be cleared */
837 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
838 
839 		timeout = jiffies + HZ;
840 		do {
841 			usleep_range(1, 10);
842 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
843 			if (time_after(jiffies, timeout)) {
844 				netdev_warn(dev->net,
845 					    "timeout on OTP_PWR_DN");
846 				return -EIO;
847 			}
848 		} while (buf & OTP_PWR_DN_PWRDN_N_);
849 	}
850 
851 	for (i = 0; i < length; i++) {
852 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
853 					((offset + i) >> 8) & OTP_ADDR1_15_11);
854 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
855 					((offset + i) & OTP_ADDR2_10_3));
856 
857 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
858 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
859 
860 		timeout = jiffies + HZ;
861 		do {
862 			udelay(1);
863 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
864 			if (time_after(jiffies, timeout)) {
865 				netdev_warn(dev->net,
866 					    "timeout on OTP_STATUS");
867 				return -EIO;
868 			}
869 		} while (buf & OTP_STATUS_BUSY_);
870 
871 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
872 
873 		data[i] = (u8)(buf & 0xFF);
874 	}
875 
876 	return 0;
877 }
878 
879 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
880 				 u32 length, u8 *data)
881 {
882 	int i;
883 	int ret;
884 	u32 buf;
885 	unsigned long timeout;
886 
887 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
888 
889 	if (buf & OTP_PWR_DN_PWRDN_N_) {
890 		/* clear it and wait to be cleared */
891 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
892 
893 		timeout = jiffies + HZ;
894 		do {
895 			udelay(1);
896 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
897 			if (time_after(jiffies, timeout)) {
898 				netdev_warn(dev->net,
899 					    "timeout on OTP_PWR_DN completion");
900 				return -EIO;
901 			}
902 		} while (buf & OTP_PWR_DN_PWRDN_N_);
903 	}
904 
905 	/* set to BYTE program mode */
906 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
907 
908 	for (i = 0; i < length; i++) {
909 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
910 					((offset + i) >> 8) & OTP_ADDR1_15_11);
911 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
912 					((offset + i) & OTP_ADDR2_10_3));
913 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
914 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
915 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
916 
917 		timeout = jiffies + HZ;
918 		do {
919 			udelay(1);
920 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
921 			if (time_after(jiffies, timeout)) {
922 				netdev_warn(dev->net,
923 					    "Timeout on OTP_STATUS completion");
924 				return -EIO;
925 			}
926 		} while (buf & OTP_STATUS_BUSY_);
927 	}
928 
929 	return 0;
930 }
931 
932 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
933 			    u32 length, u8 *data)
934 {
935 	u8 sig;
936 	int ret;
937 
938 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
939 
940 	if (ret == 0) {
941 		if (sig == OTP_INDICATOR_2)
942 			offset += 0x100;
943 		else if (sig != OTP_INDICATOR_1)
944 			ret = -EINVAL;
945 		if (!ret)
946 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
947 	}
948 
949 	return ret;
950 }
951 
952 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
953 {
954 	int i, ret;
955 
956 	for (i = 0; i < 100; i++) {
957 		u32 dp_sel;
958 
959 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
960 		if (unlikely(ret < 0))
961 			return -EIO;
962 
963 		if (dp_sel & DP_SEL_DPRDY_)
964 			return 0;
965 
966 		usleep_range(40, 100);
967 	}
968 
969 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
970 
971 	return -EIO;
972 }
973 
974 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
975 				  u32 addr, u32 length, u32 *buf)
976 {
977 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
978 	u32 dp_sel;
979 	int i, ret;
980 
981 	if (usb_autopm_get_interface(dev->intf) < 0)
982 			return 0;
983 
984 	mutex_lock(&pdata->dataport_mutex);
985 
986 	ret = lan78xx_dataport_wait_not_busy(dev);
987 	if (ret < 0)
988 		goto done;
989 
990 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
991 
992 	dp_sel &= ~DP_SEL_RSEL_MASK_;
993 	dp_sel |= ram_select;
994 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
995 
996 	for (i = 0; i < length; i++) {
997 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
998 
999 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1000 
1001 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1002 
1003 		ret = lan78xx_dataport_wait_not_busy(dev);
1004 		if (ret < 0)
1005 			goto done;
1006 	}
1007 
1008 done:
1009 	mutex_unlock(&pdata->dataport_mutex);
1010 	usb_autopm_put_interface(dev->intf);
1011 
1012 	return ret;
1013 }
1014 
1015 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1016 				    int index, u8 addr[ETH_ALEN])
1017 {
1018 	u32 temp;
1019 
1020 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1021 		temp = addr[3];
1022 		temp = addr[2] | (temp << 8);
1023 		temp = addr[1] | (temp << 8);
1024 		temp = addr[0] | (temp << 8);
1025 		pdata->pfilter_table[index][1] = temp;
1026 		temp = addr[5];
1027 		temp = addr[4] | (temp << 8);
1028 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1029 		pdata->pfilter_table[index][0] = temp;
1030 	}
1031 }
1032 
1033 /* returns hash bit number for given MAC address */
1034 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1035 {
1036 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1037 }
1038 
1039 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1040 {
1041 	struct lan78xx_priv *pdata =
1042 			container_of(param, struct lan78xx_priv, set_multicast);
1043 	struct lan78xx_net *dev = pdata->dev;
1044 	int i;
1045 	int ret;
1046 
1047 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1048 		  pdata->rfe_ctl);
1049 
1050 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1051 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1052 
1053 	for (i = 1; i < NUM_OF_MAF; i++) {
1054 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1055 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1056 					pdata->pfilter_table[i][1]);
1057 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1058 					pdata->pfilter_table[i][0]);
1059 	}
1060 
1061 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1062 }
1063 
1064 static void lan78xx_set_multicast(struct net_device *netdev)
1065 {
1066 	struct lan78xx_net *dev = netdev_priv(netdev);
1067 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1068 	unsigned long flags;
1069 	int i;
1070 
1071 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1072 
1073 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1074 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1075 
1076 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1077 			pdata->mchash_table[i] = 0;
1078 	/* pfilter_table[0] has own HW address */
1079 	for (i = 1; i < NUM_OF_MAF; i++) {
1080 			pdata->pfilter_table[i][0] =
1081 			pdata->pfilter_table[i][1] = 0;
1082 	}
1083 
1084 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1085 
1086 	if (dev->net->flags & IFF_PROMISC) {
1087 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1088 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1089 	} else {
1090 		if (dev->net->flags & IFF_ALLMULTI) {
1091 			netif_dbg(dev, drv, dev->net,
1092 				  "receive all multicast enabled");
1093 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1094 		}
1095 	}
1096 
1097 	if (netdev_mc_count(dev->net)) {
1098 		struct netdev_hw_addr *ha;
1099 		int i;
1100 
1101 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1102 
1103 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1104 
1105 		i = 1;
1106 		netdev_for_each_mc_addr(ha, netdev) {
1107 			/* set first 32 into Perfect Filter */
1108 			if (i < 33) {
1109 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1110 			} else {
1111 				u32 bitnum = lan78xx_hash(ha->addr);
1112 
1113 				pdata->mchash_table[bitnum / 32] |=
1114 							(1 << (bitnum % 32));
1115 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1116 			}
1117 			i++;
1118 		}
1119 	}
1120 
1121 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1122 
1123 	/* defer register writes to a sleepable context */
1124 	schedule_work(&pdata->set_multicast);
1125 }
1126 
1127 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1128 				      u16 lcladv, u16 rmtadv)
1129 {
1130 	u32 flow = 0, fct_flow = 0;
1131 	int ret;
1132 	u8 cap;
1133 
1134 	if (dev->fc_autoneg)
1135 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1136 	else
1137 		cap = dev->fc_request_control;
1138 
1139 	if (cap & FLOW_CTRL_TX)
1140 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1141 
1142 	if (cap & FLOW_CTRL_RX)
1143 		flow |= FLOW_CR_RX_FCEN_;
1144 
1145 	if (dev->udev->speed == USB_SPEED_SUPER)
1146 		fct_flow = 0x817;
1147 	else if (dev->udev->speed == USB_SPEED_HIGH)
1148 		fct_flow = 0x211;
1149 
1150 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1151 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1152 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1153 
1154 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1155 
1156 	/* threshold value should be set before enabling flow */
1157 	ret = lan78xx_write_reg(dev, FLOW, flow);
1158 
1159 	return 0;
1160 }
1161 
1162 static int lan78xx_link_reset(struct lan78xx_net *dev)
1163 {
1164 	struct phy_device *phydev = dev->net->phydev;
1165 	struct ethtool_link_ksettings ecmd;
1166 	int ladv, radv, ret;
1167 	u32 buf;
1168 
1169 	/* clear LAN78xx interrupt status */
1170 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1171 	if (unlikely(ret < 0))
1172 		return -EIO;
1173 
1174 	phy_read_status(phydev);
1175 
1176 	if (!phydev->link && dev->link_on) {
1177 		dev->link_on = false;
1178 
1179 		/* reset MAC */
1180 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1181 		if (unlikely(ret < 0))
1182 			return -EIO;
1183 		buf |= MAC_CR_RST_;
1184 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1185 		if (unlikely(ret < 0))
1186 			return -EIO;
1187 
1188 		del_timer(&dev->stat_monitor);
1189 	} else if (phydev->link && !dev->link_on) {
1190 		dev->link_on = true;
1191 
1192 		phy_ethtool_ksettings_get(phydev, &ecmd);
1193 
1194 		if (dev->udev->speed == USB_SPEED_SUPER) {
1195 			if (ecmd.base.speed == 1000) {
1196 				/* disable U2 */
1197 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1198 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1199 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1200 				/* enable U1 */
1201 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1202 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1203 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1204 			} else {
1205 				/* enable U1 & U2 */
1206 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1207 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1208 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1209 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1210 			}
1211 		}
1212 
1213 		ladv = phy_read(phydev, MII_ADVERTISE);
1214 		if (ladv < 0)
1215 			return ladv;
1216 
1217 		radv = phy_read(phydev, MII_LPA);
1218 		if (radv < 0)
1219 			return radv;
1220 
1221 		netif_dbg(dev, link, dev->net,
1222 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1223 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1224 
1225 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1226 						 radv);
1227 
1228 		if (!timer_pending(&dev->stat_monitor)) {
1229 			dev->delta = 1;
1230 			mod_timer(&dev->stat_monitor,
1231 				  jiffies + STAT_UPDATE_TIMER);
1232 		}
1233 
1234 		tasklet_schedule(&dev->bh);
1235 	}
1236 
1237 	return ret;
1238 }
1239 
1240 /* some work can't be done in tasklets, so we use keventd
1241  *
1242  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1243  * but tasklet_schedule() doesn't.	hope the failure is rare.
1244  */
1245 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1246 {
1247 	set_bit(work, &dev->flags);
1248 	if (!schedule_delayed_work(&dev->wq, 0))
1249 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1250 }
1251 
1252 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1253 {
1254 	u32 intdata;
1255 
1256 	if (urb->actual_length != 4) {
1257 		netdev_warn(dev->net,
1258 			    "unexpected urb length %d", urb->actual_length);
1259 		return;
1260 	}
1261 
1262 	intdata = get_unaligned_le32(urb->transfer_buffer);
1263 
1264 	if (intdata & INT_ENP_PHY_INT) {
1265 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1266 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1267 
1268 		if (dev->domain_data.phyirq > 0) {
1269 			local_irq_disable();
1270 			generic_handle_irq(dev->domain_data.phyirq);
1271 			local_irq_enable();
1272 		}
1273 	} else
1274 		netdev_warn(dev->net,
1275 			    "unexpected interrupt: 0x%08x\n", intdata);
1276 }
1277 
1278 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1279 {
1280 	return MAX_EEPROM_SIZE;
1281 }
1282 
1283 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1284 				      struct ethtool_eeprom *ee, u8 *data)
1285 {
1286 	struct lan78xx_net *dev = netdev_priv(netdev);
1287 	int ret;
1288 
1289 	ret = usb_autopm_get_interface(dev->intf);
1290 	if (ret)
1291 		return ret;
1292 
1293 	ee->magic = LAN78XX_EEPROM_MAGIC;
1294 
1295 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1296 
1297 	usb_autopm_put_interface(dev->intf);
1298 
1299 	return ret;
1300 }
1301 
1302 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1303 				      struct ethtool_eeprom *ee, u8 *data)
1304 {
1305 	struct lan78xx_net *dev = netdev_priv(netdev);
1306 	int ret;
1307 
1308 	ret = usb_autopm_get_interface(dev->intf);
1309 	if (ret)
1310 		return ret;
1311 
1312 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1313 	 * to load data from EEPROM
1314 	 */
1315 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1316 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1317 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1318 		 (ee->offset == 0) &&
1319 		 (ee->len == 512) &&
1320 		 (data[0] == OTP_INDICATOR_1))
1321 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1322 
1323 	usb_autopm_put_interface(dev->intf);
1324 
1325 	return ret;
1326 }
1327 
1328 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1329 				u8 *data)
1330 {
1331 	if (stringset == ETH_SS_STATS)
1332 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1333 }
1334 
1335 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1336 {
1337 	if (sset == ETH_SS_STATS)
1338 		return ARRAY_SIZE(lan78xx_gstrings);
1339 	else
1340 		return -EOPNOTSUPP;
1341 }
1342 
1343 static void lan78xx_get_stats(struct net_device *netdev,
1344 			      struct ethtool_stats *stats, u64 *data)
1345 {
1346 	struct lan78xx_net *dev = netdev_priv(netdev);
1347 
1348 	lan78xx_update_stats(dev);
1349 
1350 	mutex_lock(&dev->stats.access_lock);
1351 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1352 	mutex_unlock(&dev->stats.access_lock);
1353 }
1354 
1355 static void lan78xx_get_wol(struct net_device *netdev,
1356 			    struct ethtool_wolinfo *wol)
1357 {
1358 	struct lan78xx_net *dev = netdev_priv(netdev);
1359 	int ret;
1360 	u32 buf;
1361 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1362 
1363 	if (usb_autopm_get_interface(dev->intf) < 0)
1364 			return;
1365 
1366 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1367 	if (unlikely(ret < 0)) {
1368 		wol->supported = 0;
1369 		wol->wolopts = 0;
1370 	} else {
1371 		if (buf & USB_CFG_RMT_WKP_) {
1372 			wol->supported = WAKE_ALL;
1373 			wol->wolopts = pdata->wol;
1374 		} else {
1375 			wol->supported = 0;
1376 			wol->wolopts = 0;
1377 		}
1378 	}
1379 
1380 	usb_autopm_put_interface(dev->intf);
1381 }
1382 
1383 static int lan78xx_set_wol(struct net_device *netdev,
1384 			   struct ethtool_wolinfo *wol)
1385 {
1386 	struct lan78xx_net *dev = netdev_priv(netdev);
1387 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1388 	int ret;
1389 
1390 	ret = usb_autopm_get_interface(dev->intf);
1391 	if (ret < 0)
1392 		return ret;
1393 
1394 	if (wol->wolopts & ~WAKE_ALL)
1395 		return -EINVAL;
1396 
1397 	pdata->wol = wol->wolopts;
1398 
1399 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1400 
1401 	phy_ethtool_set_wol(netdev->phydev, wol);
1402 
1403 	usb_autopm_put_interface(dev->intf);
1404 
1405 	return ret;
1406 }
1407 
1408 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1409 {
1410 	struct lan78xx_net *dev = netdev_priv(net);
1411 	struct phy_device *phydev = net->phydev;
1412 	int ret;
1413 	u32 buf;
1414 
1415 	ret = usb_autopm_get_interface(dev->intf);
1416 	if (ret < 0)
1417 		return ret;
1418 
1419 	ret = phy_ethtool_get_eee(phydev, edata);
1420 	if (ret < 0)
1421 		goto exit;
1422 
1423 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1424 	if (buf & MAC_CR_EEE_EN_) {
1425 		edata->eee_enabled = true;
1426 		edata->eee_active = !!(edata->advertised &
1427 				       edata->lp_advertised);
1428 		edata->tx_lpi_enabled = true;
1429 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1430 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1431 		edata->tx_lpi_timer = buf;
1432 	} else {
1433 		edata->eee_enabled = false;
1434 		edata->eee_active = false;
1435 		edata->tx_lpi_enabled = false;
1436 		edata->tx_lpi_timer = 0;
1437 	}
1438 
1439 	ret = 0;
1440 exit:
1441 	usb_autopm_put_interface(dev->intf);
1442 
1443 	return ret;
1444 }
1445 
1446 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1447 {
1448 	struct lan78xx_net *dev = netdev_priv(net);
1449 	int ret;
1450 	u32 buf;
1451 
1452 	ret = usb_autopm_get_interface(dev->intf);
1453 	if (ret < 0)
1454 		return ret;
1455 
1456 	if (edata->eee_enabled) {
1457 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1458 		buf |= MAC_CR_EEE_EN_;
1459 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1460 
1461 		phy_ethtool_set_eee(net->phydev, edata);
1462 
1463 		buf = (u32)edata->tx_lpi_timer;
1464 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1465 	} else {
1466 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1467 		buf &= ~MAC_CR_EEE_EN_;
1468 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1469 	}
1470 
1471 	usb_autopm_put_interface(dev->intf);
1472 
1473 	return 0;
1474 }
1475 
1476 static u32 lan78xx_get_link(struct net_device *net)
1477 {
1478 	phy_read_status(net->phydev);
1479 
1480 	return net->phydev->link;
1481 }
1482 
1483 static void lan78xx_get_drvinfo(struct net_device *net,
1484 				struct ethtool_drvinfo *info)
1485 {
1486 	struct lan78xx_net *dev = netdev_priv(net);
1487 
1488 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1489 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1490 }
1491 
1492 static u32 lan78xx_get_msglevel(struct net_device *net)
1493 {
1494 	struct lan78xx_net *dev = netdev_priv(net);
1495 
1496 	return dev->msg_enable;
1497 }
1498 
1499 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1500 {
1501 	struct lan78xx_net *dev = netdev_priv(net);
1502 
1503 	dev->msg_enable = level;
1504 }
1505 
1506 static int lan78xx_get_link_ksettings(struct net_device *net,
1507 				      struct ethtool_link_ksettings *cmd)
1508 {
1509 	struct lan78xx_net *dev = netdev_priv(net);
1510 	struct phy_device *phydev = net->phydev;
1511 	int ret;
1512 
1513 	ret = usb_autopm_get_interface(dev->intf);
1514 	if (ret < 0)
1515 		return ret;
1516 
1517 	phy_ethtool_ksettings_get(phydev, cmd);
1518 
1519 	usb_autopm_put_interface(dev->intf);
1520 
1521 	return ret;
1522 }
1523 
1524 static int lan78xx_set_link_ksettings(struct net_device *net,
1525 				      const struct ethtool_link_ksettings *cmd)
1526 {
1527 	struct lan78xx_net *dev = netdev_priv(net);
1528 	struct phy_device *phydev = net->phydev;
1529 	int ret = 0;
1530 	int temp;
1531 
1532 	ret = usb_autopm_get_interface(dev->intf);
1533 	if (ret < 0)
1534 		return ret;
1535 
1536 	/* change speed & duplex */
1537 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1538 
1539 	if (!cmd->base.autoneg) {
1540 		/* force link down */
1541 		temp = phy_read(phydev, MII_BMCR);
1542 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1543 		mdelay(1);
1544 		phy_write(phydev, MII_BMCR, temp);
1545 	}
1546 
1547 	usb_autopm_put_interface(dev->intf);
1548 
1549 	return ret;
1550 }
1551 
1552 static void lan78xx_get_pause(struct net_device *net,
1553 			      struct ethtool_pauseparam *pause)
1554 {
1555 	struct lan78xx_net *dev = netdev_priv(net);
1556 	struct phy_device *phydev = net->phydev;
1557 	struct ethtool_link_ksettings ecmd;
1558 
1559 	phy_ethtool_ksettings_get(phydev, &ecmd);
1560 
1561 	pause->autoneg = dev->fc_autoneg;
1562 
1563 	if (dev->fc_request_control & FLOW_CTRL_TX)
1564 		pause->tx_pause = 1;
1565 
1566 	if (dev->fc_request_control & FLOW_CTRL_RX)
1567 		pause->rx_pause = 1;
1568 }
1569 
1570 static int lan78xx_set_pause(struct net_device *net,
1571 			     struct ethtool_pauseparam *pause)
1572 {
1573 	struct lan78xx_net *dev = netdev_priv(net);
1574 	struct phy_device *phydev = net->phydev;
1575 	struct ethtool_link_ksettings ecmd;
1576 	int ret;
1577 
1578 	phy_ethtool_ksettings_get(phydev, &ecmd);
1579 
1580 	if (pause->autoneg && !ecmd.base.autoneg) {
1581 		ret = -EINVAL;
1582 		goto exit;
1583 	}
1584 
1585 	dev->fc_request_control = 0;
1586 	if (pause->rx_pause)
1587 		dev->fc_request_control |= FLOW_CTRL_RX;
1588 
1589 	if (pause->tx_pause)
1590 		dev->fc_request_control |= FLOW_CTRL_TX;
1591 
1592 	if (ecmd.base.autoneg) {
1593 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1594 		u32 mii_adv;
1595 
1596 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1597 				   ecmd.link_modes.advertising);
1598 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1599 				   ecmd.link_modes.advertising);
1600 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1601 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1602 		linkmode_or(ecmd.link_modes.advertising, fc,
1603 			    ecmd.link_modes.advertising);
1604 
1605 		phy_ethtool_ksettings_set(phydev, &ecmd);
1606 	}
1607 
1608 	dev->fc_autoneg = pause->autoneg;
1609 
1610 	ret = 0;
1611 exit:
1612 	return ret;
1613 }
1614 
1615 static int lan78xx_get_regs_len(struct net_device *netdev)
1616 {
1617 	if (!netdev->phydev)
1618 		return (sizeof(lan78xx_regs));
1619 	else
1620 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1621 }
1622 
1623 static void
1624 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1625 		 void *buf)
1626 {
1627 	u32 *data = buf;
1628 	int i, j;
1629 	struct lan78xx_net *dev = netdev_priv(netdev);
1630 
1631 	/* Read Device/MAC registers */
1632 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1633 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1634 
1635 	if (!netdev->phydev)
1636 		return;
1637 
1638 	/* Read PHY registers */
1639 	for (j = 0; j < 32; i++, j++)
1640 		data[i] = phy_read(netdev->phydev, j);
1641 }
1642 
1643 static const struct ethtool_ops lan78xx_ethtool_ops = {
1644 	.get_link	= lan78xx_get_link,
1645 	.nway_reset	= phy_ethtool_nway_reset,
1646 	.get_drvinfo	= lan78xx_get_drvinfo,
1647 	.get_msglevel	= lan78xx_get_msglevel,
1648 	.set_msglevel	= lan78xx_set_msglevel,
1649 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1650 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1651 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1652 	.get_ethtool_stats = lan78xx_get_stats,
1653 	.get_sset_count = lan78xx_get_sset_count,
1654 	.get_strings	= lan78xx_get_strings,
1655 	.get_wol	= lan78xx_get_wol,
1656 	.set_wol	= lan78xx_set_wol,
1657 	.get_eee	= lan78xx_get_eee,
1658 	.set_eee	= lan78xx_set_eee,
1659 	.get_pauseparam	= lan78xx_get_pause,
1660 	.set_pauseparam	= lan78xx_set_pause,
1661 	.get_link_ksettings = lan78xx_get_link_ksettings,
1662 	.set_link_ksettings = lan78xx_set_link_ksettings,
1663 	.get_regs_len	= lan78xx_get_regs_len,
1664 	.get_regs	= lan78xx_get_regs,
1665 };
1666 
1667 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1668 {
1669 	u32 addr_lo, addr_hi;
1670 	int ret;
1671 	u8 addr[6];
1672 
1673 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1674 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1675 
1676 	addr[0] = addr_lo & 0xFF;
1677 	addr[1] = (addr_lo >> 8) & 0xFF;
1678 	addr[2] = (addr_lo >> 16) & 0xFF;
1679 	addr[3] = (addr_lo >> 24) & 0xFF;
1680 	addr[4] = addr_hi & 0xFF;
1681 	addr[5] = (addr_hi >> 8) & 0xFF;
1682 
1683 	if (!is_valid_ether_addr(addr)) {
1684 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1685 			/* valid address present in Device Tree */
1686 			netif_dbg(dev, ifup, dev->net,
1687 				  "MAC address read from Device Tree");
1688 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1689 						 ETH_ALEN, addr) == 0) ||
1690 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1691 					      ETH_ALEN, addr) == 0)) &&
1692 			   is_valid_ether_addr(addr)) {
1693 			/* eeprom values are valid so use them */
1694 			netif_dbg(dev, ifup, dev->net,
1695 				  "MAC address read from EEPROM");
1696 		} else {
1697 			/* generate random MAC */
1698 			eth_random_addr(addr);
1699 			netif_dbg(dev, ifup, dev->net,
1700 				  "MAC address set to random addr");
1701 		}
1702 
1703 		addr_lo = addr[0] | (addr[1] << 8) |
1704 			  (addr[2] << 16) | (addr[3] << 24);
1705 		addr_hi = addr[4] | (addr[5] << 8);
1706 
1707 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1708 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1709 	}
1710 
1711 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1712 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1713 
1714 	ether_addr_copy(dev->net->dev_addr, addr);
1715 }
1716 
1717 /* MDIO read and write wrappers for phylib */
1718 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1719 {
1720 	struct lan78xx_net *dev = bus->priv;
1721 	u32 val, addr;
1722 	int ret;
1723 
1724 	ret = usb_autopm_get_interface(dev->intf);
1725 	if (ret < 0)
1726 		return ret;
1727 
1728 	mutex_lock(&dev->phy_mutex);
1729 
1730 	/* confirm MII not busy */
1731 	ret = lan78xx_phy_wait_not_busy(dev);
1732 	if (ret < 0)
1733 		goto done;
1734 
1735 	/* set the address, index & direction (read from PHY) */
1736 	addr = mii_access(phy_id, idx, MII_READ);
1737 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1738 
1739 	ret = lan78xx_phy_wait_not_busy(dev);
1740 	if (ret < 0)
1741 		goto done;
1742 
1743 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1744 
1745 	ret = (int)(val & 0xFFFF);
1746 
1747 done:
1748 	mutex_unlock(&dev->phy_mutex);
1749 	usb_autopm_put_interface(dev->intf);
1750 
1751 	return ret;
1752 }
1753 
1754 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1755 				 u16 regval)
1756 {
1757 	struct lan78xx_net *dev = bus->priv;
1758 	u32 val, addr;
1759 	int ret;
1760 
1761 	ret = usb_autopm_get_interface(dev->intf);
1762 	if (ret < 0)
1763 		return ret;
1764 
1765 	mutex_lock(&dev->phy_mutex);
1766 
1767 	/* confirm MII not busy */
1768 	ret = lan78xx_phy_wait_not_busy(dev);
1769 	if (ret < 0)
1770 		goto done;
1771 
1772 	val = (u32)regval;
1773 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1774 
1775 	/* set the address, index & direction (write to PHY) */
1776 	addr = mii_access(phy_id, idx, MII_WRITE);
1777 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1778 
1779 	ret = lan78xx_phy_wait_not_busy(dev);
1780 	if (ret < 0)
1781 		goto done;
1782 
1783 done:
1784 	mutex_unlock(&dev->phy_mutex);
1785 	usb_autopm_put_interface(dev->intf);
1786 	return 0;
1787 }
1788 
1789 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1790 {
1791 	struct device_node *node;
1792 	int ret;
1793 
1794 	dev->mdiobus = mdiobus_alloc();
1795 	if (!dev->mdiobus) {
1796 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1797 		return -ENOMEM;
1798 	}
1799 
1800 	dev->mdiobus->priv = (void *)dev;
1801 	dev->mdiobus->read = lan78xx_mdiobus_read;
1802 	dev->mdiobus->write = lan78xx_mdiobus_write;
1803 	dev->mdiobus->name = "lan78xx-mdiobus";
1804 	dev->mdiobus->parent = &dev->udev->dev;
1805 
1806 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1807 		 dev->udev->bus->busnum, dev->udev->devnum);
1808 
1809 	switch (dev->chipid) {
1810 	case ID_REV_CHIP_ID_7800_:
1811 	case ID_REV_CHIP_ID_7850_:
1812 		/* set to internal PHY id */
1813 		dev->mdiobus->phy_mask = ~(1 << 1);
1814 		break;
1815 	case ID_REV_CHIP_ID_7801_:
1816 		/* scan thru PHYAD[2..0] */
1817 		dev->mdiobus->phy_mask = ~(0xFF);
1818 		break;
1819 	}
1820 
1821 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1822 	ret = of_mdiobus_register(dev->mdiobus, node);
1823 	of_node_put(node);
1824 	if (ret) {
1825 		netdev_err(dev->net, "can't register MDIO bus\n");
1826 		goto exit1;
1827 	}
1828 
1829 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1830 	return 0;
1831 exit1:
1832 	mdiobus_free(dev->mdiobus);
1833 	return ret;
1834 }
1835 
1836 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1837 {
1838 	mdiobus_unregister(dev->mdiobus);
1839 	mdiobus_free(dev->mdiobus);
1840 }
1841 
1842 static void lan78xx_link_status_change(struct net_device *net)
1843 {
1844 	struct phy_device *phydev = net->phydev;
1845 	int ret, temp;
1846 
1847 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1848 	 * when cable is switched between long(~50+m) and short one.
1849 	 * As workaround, set to 10 before setting to 100
1850 	 * at forced 100 F/H mode.
1851 	 */
1852 	if (!phydev->autoneg && (phydev->speed == 100)) {
1853 		/* disable phy interrupt */
1854 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1855 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1856 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1857 
1858 		temp = phy_read(phydev, MII_BMCR);
1859 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1860 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1861 		temp |= BMCR_SPEED100;
1862 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1863 
1864 		/* clear pending interrupt generated while workaround */
1865 		temp = phy_read(phydev, LAN88XX_INT_STS);
1866 
1867 		/* enable phy interrupt back */
1868 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1869 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1870 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1871 	}
1872 }
1873 
1874 static int irq_map(struct irq_domain *d, unsigned int irq,
1875 		   irq_hw_number_t hwirq)
1876 {
1877 	struct irq_domain_data *data = d->host_data;
1878 
1879 	irq_set_chip_data(irq, data);
1880 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1881 	irq_set_noprobe(irq);
1882 
1883 	return 0;
1884 }
1885 
1886 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1887 {
1888 	irq_set_chip_and_handler(irq, NULL, NULL);
1889 	irq_set_chip_data(irq, NULL);
1890 }
1891 
1892 static const struct irq_domain_ops chip_domain_ops = {
1893 	.map	= irq_map,
1894 	.unmap	= irq_unmap,
1895 };
1896 
1897 static void lan78xx_irq_mask(struct irq_data *irqd)
1898 {
1899 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1900 
1901 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1902 }
1903 
1904 static void lan78xx_irq_unmask(struct irq_data *irqd)
1905 {
1906 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1907 
1908 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1909 }
1910 
1911 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1912 {
1913 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1914 
1915 	mutex_lock(&data->irq_lock);
1916 }
1917 
1918 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1919 {
1920 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1921 	struct lan78xx_net *dev =
1922 			container_of(data, struct lan78xx_net, domain_data);
1923 	u32 buf;
1924 	int ret;
1925 
1926 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
1927 	 * are only two callbacks executed in non-atomic contex.
1928 	 */
1929 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1930 	if (buf != data->irqenable)
1931 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1932 
1933 	mutex_unlock(&data->irq_lock);
1934 }
1935 
1936 static struct irq_chip lan78xx_irqchip = {
1937 	.name			= "lan78xx-irqs",
1938 	.irq_mask		= lan78xx_irq_mask,
1939 	.irq_unmask		= lan78xx_irq_unmask,
1940 	.irq_bus_lock		= lan78xx_irq_bus_lock,
1941 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
1942 };
1943 
1944 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1945 {
1946 	struct device_node *of_node;
1947 	struct irq_domain *irqdomain;
1948 	unsigned int irqmap = 0;
1949 	u32 buf;
1950 	int ret = 0;
1951 
1952 	of_node = dev->udev->dev.parent->of_node;
1953 
1954 	mutex_init(&dev->domain_data.irq_lock);
1955 
1956 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1957 	dev->domain_data.irqenable = buf;
1958 
1959 	dev->domain_data.irqchip = &lan78xx_irqchip;
1960 	dev->domain_data.irq_handler = handle_simple_irq;
1961 
1962 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1963 					  &chip_domain_ops, &dev->domain_data);
1964 	if (irqdomain) {
1965 		/* create mapping for PHY interrupt */
1966 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1967 		if (!irqmap) {
1968 			irq_domain_remove(irqdomain);
1969 
1970 			irqdomain = NULL;
1971 			ret = -EINVAL;
1972 		}
1973 	} else {
1974 		ret = -EINVAL;
1975 	}
1976 
1977 	dev->domain_data.irqdomain = irqdomain;
1978 	dev->domain_data.phyirq = irqmap;
1979 
1980 	return ret;
1981 }
1982 
1983 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1984 {
1985 	if (dev->domain_data.phyirq > 0) {
1986 		irq_dispose_mapping(dev->domain_data.phyirq);
1987 
1988 		if (dev->domain_data.irqdomain)
1989 			irq_domain_remove(dev->domain_data.irqdomain);
1990 	}
1991 	dev->domain_data.phyirq = 0;
1992 	dev->domain_data.irqdomain = NULL;
1993 }
1994 
1995 static int lan8835_fixup(struct phy_device *phydev)
1996 {
1997 	int buf;
1998 	int ret;
1999 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2000 
2001 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2002 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2003 	buf &= ~0x1800;
2004 	buf |= 0x0800;
2005 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2006 
2007 	/* RGMII MAC TXC Delay Enable */
2008 	ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2009 				MAC_RGMII_ID_TXC_DELAY_EN_);
2010 
2011 	/* RGMII TX DLL Tune Adjust */
2012 	ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2013 
2014 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2015 
2016 	return 1;
2017 }
2018 
2019 static int ksz9031rnx_fixup(struct phy_device *phydev)
2020 {
2021 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2022 
2023 	/* Micrel9301RNX PHY configuration */
2024 	/* RGMII Control Signal Pad Skew */
2025 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2026 	/* RGMII RX Data Pad Skew */
2027 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2028 	/* RGMII RX Clock Pad Skew */
2029 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2030 
2031 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2032 
2033 	return 1;
2034 }
2035 
2036 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2037 {
2038 	u32 buf;
2039 	int ret;
2040 	struct fixed_phy_status fphy_status = {
2041 		.link = 1,
2042 		.speed = SPEED_1000,
2043 		.duplex = DUPLEX_FULL,
2044 	};
2045 	struct phy_device *phydev;
2046 
2047 	phydev = phy_find_first(dev->mdiobus);
2048 	if (!phydev) {
2049 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2050 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2051 		if (IS_ERR(phydev)) {
2052 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2053 			return NULL;
2054 		}
2055 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2056 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2057 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2058 					MAC_RGMII_ID_TXC_DELAY_EN_);
2059 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2060 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2061 		buf |= HW_CFG_CLK125_EN_;
2062 		buf |= HW_CFG_REFCLK25_EN_;
2063 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2064 	} else {
2065 		if (!phydev->drv) {
2066 			netdev_err(dev->net, "no PHY driver found\n");
2067 			return NULL;
2068 		}
2069 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2070 		/* external PHY fixup for KSZ9031RNX */
2071 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2072 						 ksz9031rnx_fixup);
2073 		if (ret < 0) {
2074 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2075 			return NULL;
2076 		}
2077 		/* external PHY fixup for LAN8835 */
2078 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2079 						 lan8835_fixup);
2080 		if (ret < 0) {
2081 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2082 			return NULL;
2083 		}
2084 		/* add more external PHY fixup here if needed */
2085 
2086 		phydev->is_internal = false;
2087 	}
2088 	return phydev;
2089 }
2090 
2091 static int lan78xx_phy_init(struct lan78xx_net *dev)
2092 {
2093 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2094 	int ret;
2095 	u32 mii_adv;
2096 	struct phy_device *phydev;
2097 
2098 	switch (dev->chipid) {
2099 	case ID_REV_CHIP_ID_7801_:
2100 		phydev = lan7801_phy_init(dev);
2101 		if (!phydev) {
2102 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2103 			return -EIO;
2104 		}
2105 		break;
2106 
2107 	case ID_REV_CHIP_ID_7800_:
2108 	case ID_REV_CHIP_ID_7850_:
2109 		phydev = phy_find_first(dev->mdiobus);
2110 		if (!phydev) {
2111 			netdev_err(dev->net, "no PHY found\n");
2112 			return -EIO;
2113 		}
2114 		phydev->is_internal = true;
2115 		dev->interface = PHY_INTERFACE_MODE_GMII;
2116 		break;
2117 
2118 	default:
2119 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2120 		return -EIO;
2121 	}
2122 
2123 	/* if phyirq is not set, use polling mode in phylib */
2124 	if (dev->domain_data.phyirq > 0)
2125 		phydev->irq = dev->domain_data.phyirq;
2126 	else
2127 		phydev->irq = 0;
2128 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2129 
2130 	/* set to AUTOMDIX */
2131 	phydev->mdix = ETH_TP_MDI_AUTO;
2132 
2133 	ret = phy_connect_direct(dev->net, phydev,
2134 				 lan78xx_link_status_change,
2135 				 dev->interface);
2136 	if (ret) {
2137 		netdev_err(dev->net, "can't attach PHY to %s\n",
2138 			   dev->mdiobus->id);
2139 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2140 			if (phy_is_pseudo_fixed_link(phydev)) {
2141 				fixed_phy_unregister(phydev);
2142 			} else {
2143 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2144 							     0xfffffff0);
2145 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2146 							     0xfffffff0);
2147 			}
2148 		}
2149 		return -EIO;
2150 	}
2151 
2152 	/* MAC doesn't support 1000T Half */
2153 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2154 
2155 	/* support both flow controls */
2156 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2157 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2158 			   phydev->advertising);
2159 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2160 			   phydev->advertising);
2161 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2162 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2163 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2164 
2165 	if (phydev->mdio.dev.of_node) {
2166 		u32 reg;
2167 		int len;
2168 
2169 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2170 						      "microchip,led-modes",
2171 						      sizeof(u32));
2172 		if (len >= 0) {
2173 			/* Ensure the appropriate LEDs are enabled */
2174 			lan78xx_read_reg(dev, HW_CFG, &reg);
2175 			reg &= ~(HW_CFG_LED0_EN_ |
2176 				 HW_CFG_LED1_EN_ |
2177 				 HW_CFG_LED2_EN_ |
2178 				 HW_CFG_LED3_EN_);
2179 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2180 				(len > 1) * HW_CFG_LED1_EN_ |
2181 				(len > 2) * HW_CFG_LED2_EN_ |
2182 				(len > 3) * HW_CFG_LED3_EN_;
2183 			lan78xx_write_reg(dev, HW_CFG, reg);
2184 		}
2185 	}
2186 
2187 	genphy_config_aneg(phydev);
2188 
2189 	dev->fc_autoneg = phydev->autoneg;
2190 
2191 	return 0;
2192 }
2193 
2194 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2195 {
2196 	int ret = 0;
2197 	u32 buf;
2198 	bool rxenabled;
2199 
2200 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2201 
2202 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2203 
2204 	if (rxenabled) {
2205 		buf &= ~MAC_RX_RXEN_;
2206 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2207 	}
2208 
2209 	/* add 4 to size for FCS */
2210 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2211 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2212 
2213 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2214 
2215 	if (rxenabled) {
2216 		buf |= MAC_RX_RXEN_;
2217 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2218 	}
2219 
2220 	return 0;
2221 }
2222 
2223 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2224 {
2225 	struct sk_buff *skb;
2226 	unsigned long flags;
2227 	int count = 0;
2228 
2229 	spin_lock_irqsave(&q->lock, flags);
2230 	while (!skb_queue_empty(q)) {
2231 		struct skb_data	*entry;
2232 		struct urb *urb;
2233 		int ret;
2234 
2235 		skb_queue_walk(q, skb) {
2236 			entry = (struct skb_data *)skb->cb;
2237 			if (entry->state != unlink_start)
2238 				goto found;
2239 		}
2240 		break;
2241 found:
2242 		entry->state = unlink_start;
2243 		urb = entry->urb;
2244 
2245 		/* Get reference count of the URB to avoid it to be
2246 		 * freed during usb_unlink_urb, which may trigger
2247 		 * use-after-free problem inside usb_unlink_urb since
2248 		 * usb_unlink_urb is always racing with .complete
2249 		 * handler(include defer_bh).
2250 		 */
2251 		usb_get_urb(urb);
2252 		spin_unlock_irqrestore(&q->lock, flags);
2253 		/* during some PM-driven resume scenarios,
2254 		 * these (async) unlinks complete immediately
2255 		 */
2256 		ret = usb_unlink_urb(urb);
2257 		if (ret != -EINPROGRESS && ret != 0)
2258 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2259 		else
2260 			count++;
2261 		usb_put_urb(urb);
2262 		spin_lock_irqsave(&q->lock, flags);
2263 	}
2264 	spin_unlock_irqrestore(&q->lock, flags);
2265 	return count;
2266 }
2267 
2268 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2269 {
2270 	struct lan78xx_net *dev = netdev_priv(netdev);
2271 	int ll_mtu = new_mtu + netdev->hard_header_len;
2272 	int old_hard_mtu = dev->hard_mtu;
2273 	int old_rx_urb_size = dev->rx_urb_size;
2274 	int ret;
2275 
2276 	/* no second zero-length packet read wanted after mtu-sized packets */
2277 	if ((ll_mtu % dev->maxpacket) == 0)
2278 		return -EDOM;
2279 
2280 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2281 
2282 	netdev->mtu = new_mtu;
2283 
2284 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2285 	if (dev->rx_urb_size == old_hard_mtu) {
2286 		dev->rx_urb_size = dev->hard_mtu;
2287 		if (dev->rx_urb_size > old_rx_urb_size) {
2288 			if (netif_running(dev->net)) {
2289 				unlink_urbs(dev, &dev->rxq);
2290 				tasklet_schedule(&dev->bh);
2291 			}
2292 		}
2293 	}
2294 
2295 	return 0;
2296 }
2297 
2298 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2299 {
2300 	struct lan78xx_net *dev = netdev_priv(netdev);
2301 	struct sockaddr *addr = p;
2302 	u32 addr_lo, addr_hi;
2303 	int ret;
2304 
2305 	if (netif_running(netdev))
2306 		return -EBUSY;
2307 
2308 	if (!is_valid_ether_addr(addr->sa_data))
2309 		return -EADDRNOTAVAIL;
2310 
2311 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2312 
2313 	addr_lo = netdev->dev_addr[0] |
2314 		  netdev->dev_addr[1] << 8 |
2315 		  netdev->dev_addr[2] << 16 |
2316 		  netdev->dev_addr[3] << 24;
2317 	addr_hi = netdev->dev_addr[4] |
2318 		  netdev->dev_addr[5] << 8;
2319 
2320 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2321 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2322 
2323 	/* Added to support MAC address changes */
2324 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2325 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2326 
2327 	return 0;
2328 }
2329 
2330 /* Enable or disable Rx checksum offload engine */
2331 static int lan78xx_set_features(struct net_device *netdev,
2332 				netdev_features_t features)
2333 {
2334 	struct lan78xx_net *dev = netdev_priv(netdev);
2335 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2336 	unsigned long flags;
2337 	int ret;
2338 
2339 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2340 
2341 	if (features & NETIF_F_RXCSUM) {
2342 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2343 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2344 	} else {
2345 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2346 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2347 	}
2348 
2349 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2350 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2351 	else
2352 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2353 
2354 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2355 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2356 	else
2357 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2358 
2359 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2360 
2361 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2362 
2363 	return 0;
2364 }
2365 
2366 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2367 {
2368 	struct lan78xx_priv *pdata =
2369 			container_of(param, struct lan78xx_priv, set_vlan);
2370 	struct lan78xx_net *dev = pdata->dev;
2371 
2372 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2373 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2374 }
2375 
2376 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2377 				   __be16 proto, u16 vid)
2378 {
2379 	struct lan78xx_net *dev = netdev_priv(netdev);
2380 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2381 	u16 vid_bit_index;
2382 	u16 vid_dword_index;
2383 
2384 	vid_dword_index = (vid >> 5) & 0x7F;
2385 	vid_bit_index = vid & 0x1F;
2386 
2387 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2388 
2389 	/* defer register writes to a sleepable context */
2390 	schedule_work(&pdata->set_vlan);
2391 
2392 	return 0;
2393 }
2394 
2395 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2396 				    __be16 proto, u16 vid)
2397 {
2398 	struct lan78xx_net *dev = netdev_priv(netdev);
2399 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2400 	u16 vid_bit_index;
2401 	u16 vid_dword_index;
2402 
2403 	vid_dword_index = (vid >> 5) & 0x7F;
2404 	vid_bit_index = vid & 0x1F;
2405 
2406 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2407 
2408 	/* defer register writes to a sleepable context */
2409 	schedule_work(&pdata->set_vlan);
2410 
2411 	return 0;
2412 }
2413 
2414 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2415 {
2416 	int ret;
2417 	u32 buf;
2418 	u32 regs[6] = { 0 };
2419 
2420 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2421 	if (buf & USB_CFG1_LTM_ENABLE_) {
2422 		u8 temp[2];
2423 		/* Get values from EEPROM first */
2424 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2425 			if (temp[0] == 24) {
2426 				ret = lan78xx_read_raw_eeprom(dev,
2427 							      temp[1] * 2,
2428 							      24,
2429 							      (u8 *)regs);
2430 				if (ret < 0)
2431 					return;
2432 			}
2433 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2434 			if (temp[0] == 24) {
2435 				ret = lan78xx_read_raw_otp(dev,
2436 							   temp[1] * 2,
2437 							   24,
2438 							   (u8 *)regs);
2439 				if (ret < 0)
2440 					return;
2441 			}
2442 		}
2443 	}
2444 
2445 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2446 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2447 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2448 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2449 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2450 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2451 }
2452 
2453 static int lan78xx_reset(struct lan78xx_net *dev)
2454 {
2455 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2456 	u32 buf;
2457 	int ret = 0;
2458 	unsigned long timeout;
2459 	u8 sig;
2460 
2461 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2462 	buf |= HW_CFG_LRST_;
2463 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2464 
2465 	timeout = jiffies + HZ;
2466 	do {
2467 		mdelay(1);
2468 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2469 		if (time_after(jiffies, timeout)) {
2470 			netdev_warn(dev->net,
2471 				    "timeout on completion of LiteReset");
2472 			return -EIO;
2473 		}
2474 	} while (buf & HW_CFG_LRST_);
2475 
2476 	lan78xx_init_mac_address(dev);
2477 
2478 	/* save DEVID for later usage */
2479 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2480 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2481 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2482 
2483 	/* Respond to the IN token with a NAK */
2484 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2485 	buf |= USB_CFG_BIR_;
2486 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2487 
2488 	/* Init LTM */
2489 	lan78xx_init_ltm(dev);
2490 
2491 	if (dev->udev->speed == USB_SPEED_SUPER) {
2492 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2493 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2494 		dev->rx_qlen = 4;
2495 		dev->tx_qlen = 4;
2496 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2497 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2498 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2499 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2500 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2501 	} else {
2502 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2503 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2504 		dev->rx_qlen = 4;
2505 		dev->tx_qlen = 4;
2506 	}
2507 
2508 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2509 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2510 
2511 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2512 	buf |= HW_CFG_MEF_;
2513 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2514 
2515 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2516 	buf |= USB_CFG_BCE_;
2517 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2518 
2519 	/* set FIFO sizes */
2520 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2521 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2522 
2523 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2524 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2525 
2526 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2527 	ret = lan78xx_write_reg(dev, FLOW, 0);
2528 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2529 
2530 	/* Don't need rfe_ctl_lock during initialisation */
2531 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2532 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2533 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2534 
2535 	/* Enable or disable checksum offload engines */
2536 	lan78xx_set_features(dev->net, dev->net->features);
2537 
2538 	lan78xx_set_multicast(dev->net);
2539 
2540 	/* reset PHY */
2541 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2542 	buf |= PMT_CTL_PHY_RST_;
2543 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2544 
2545 	timeout = jiffies + HZ;
2546 	do {
2547 		mdelay(1);
2548 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2549 		if (time_after(jiffies, timeout)) {
2550 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2551 			return -EIO;
2552 		}
2553 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2554 
2555 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2556 	/* LAN7801 only has RGMII mode */
2557 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2558 		buf &= ~MAC_CR_GMII_EN_;
2559 
2560 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2561 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2562 		if (!ret && sig != EEPROM_INDICATOR) {
2563 			/* Implies there is no external eeprom. Set mac speed */
2564 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2565 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2566 		}
2567 	}
2568 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2569 
2570 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2571 	buf |= MAC_TX_TXEN_;
2572 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2573 
2574 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2575 	buf |= FCT_TX_CTL_EN_;
2576 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2577 
2578 	ret = lan78xx_set_rx_max_frame_length(dev,
2579 					      dev->net->mtu + VLAN_ETH_HLEN);
2580 
2581 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2582 	buf |= MAC_RX_RXEN_;
2583 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2584 
2585 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2586 	buf |= FCT_RX_CTL_EN_;
2587 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2588 
2589 	return 0;
2590 }
2591 
2592 static void lan78xx_init_stats(struct lan78xx_net *dev)
2593 {
2594 	u32 *p;
2595 	int i;
2596 
2597 	/* initialize for stats update
2598 	 * some counters are 20bits and some are 32bits
2599 	 */
2600 	p = (u32 *)&dev->stats.rollover_max;
2601 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2602 		p[i] = 0xFFFFF;
2603 
2604 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2605 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2606 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2607 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2608 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2609 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2610 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2611 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2612 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2613 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2614 
2615 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2616 }
2617 
2618 static int lan78xx_open(struct net_device *net)
2619 {
2620 	struct lan78xx_net *dev = netdev_priv(net);
2621 	int ret;
2622 
2623 	ret = usb_autopm_get_interface(dev->intf);
2624 	if (ret < 0)
2625 		goto out;
2626 
2627 	phy_start(net->phydev);
2628 
2629 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2630 
2631 	/* for Link Check */
2632 	if (dev->urb_intr) {
2633 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2634 		if (ret < 0) {
2635 			netif_err(dev, ifup, dev->net,
2636 				  "intr submit %d\n", ret);
2637 			goto done;
2638 		}
2639 	}
2640 
2641 	lan78xx_init_stats(dev);
2642 
2643 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2644 
2645 	netif_start_queue(net);
2646 
2647 	dev->link_on = false;
2648 
2649 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2650 done:
2651 	usb_autopm_put_interface(dev->intf);
2652 
2653 out:
2654 	return ret;
2655 }
2656 
2657 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2658 {
2659 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2660 	DECLARE_WAITQUEUE(wait, current);
2661 	int temp;
2662 
2663 	/* ensure there are no more active urbs */
2664 	add_wait_queue(&unlink_wakeup, &wait);
2665 	set_current_state(TASK_UNINTERRUPTIBLE);
2666 	dev->wait = &unlink_wakeup;
2667 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2668 
2669 	/* maybe wait for deletions to finish. */
2670 	while (!skb_queue_empty(&dev->rxq) &&
2671 	       !skb_queue_empty(&dev->txq) &&
2672 	       !skb_queue_empty(&dev->done)) {
2673 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2674 		set_current_state(TASK_UNINTERRUPTIBLE);
2675 		netif_dbg(dev, ifdown, dev->net,
2676 			  "waited for %d urb completions\n", temp);
2677 	}
2678 	set_current_state(TASK_RUNNING);
2679 	dev->wait = NULL;
2680 	remove_wait_queue(&unlink_wakeup, &wait);
2681 }
2682 
2683 static int lan78xx_stop(struct net_device *net)
2684 {
2685 	struct lan78xx_net *dev = netdev_priv(net);
2686 
2687 	if (timer_pending(&dev->stat_monitor))
2688 		del_timer_sync(&dev->stat_monitor);
2689 
2690 	if (net->phydev)
2691 		phy_stop(net->phydev);
2692 
2693 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2694 	netif_stop_queue(net);
2695 
2696 	netif_info(dev, ifdown, dev->net,
2697 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2698 		   net->stats.rx_packets, net->stats.tx_packets,
2699 		   net->stats.rx_errors, net->stats.tx_errors);
2700 
2701 	lan78xx_terminate_urbs(dev);
2702 
2703 	usb_kill_urb(dev->urb_intr);
2704 
2705 	skb_queue_purge(&dev->rxq_pause);
2706 
2707 	/* deferred work (task, timer, softirq) must also stop.
2708 	 * can't flush_scheduled_work() until we drop rtnl (later),
2709 	 * else workers could deadlock; so make workers a NOP.
2710 	 */
2711 	dev->flags = 0;
2712 	cancel_delayed_work_sync(&dev->wq);
2713 	tasklet_kill(&dev->bh);
2714 
2715 	usb_autopm_put_interface(dev->intf);
2716 
2717 	return 0;
2718 }
2719 
2720 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2721 				       struct sk_buff *skb, gfp_t flags)
2722 {
2723 	u32 tx_cmd_a, tx_cmd_b;
2724 	void *ptr;
2725 
2726 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2727 		dev_kfree_skb_any(skb);
2728 		return NULL;
2729 	}
2730 
2731 	if (skb_linearize(skb)) {
2732 		dev_kfree_skb_any(skb);
2733 		return NULL;
2734 	}
2735 
2736 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2737 
2738 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2739 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2740 
2741 	tx_cmd_b = 0;
2742 	if (skb_is_gso(skb)) {
2743 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2744 
2745 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2746 
2747 		tx_cmd_a |= TX_CMD_A_LSO_;
2748 	}
2749 
2750 	if (skb_vlan_tag_present(skb)) {
2751 		tx_cmd_a |= TX_CMD_A_IVTG_;
2752 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2753 	}
2754 
2755 	ptr = skb_push(skb, 8);
2756 	put_unaligned_le32(tx_cmd_a, ptr);
2757 	put_unaligned_le32(tx_cmd_b, ptr + 4);
2758 
2759 	return skb;
2760 }
2761 
2762 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2763 			       struct sk_buff_head *list, enum skb_state state)
2764 {
2765 	unsigned long flags;
2766 	enum skb_state old_state;
2767 	struct skb_data *entry = (struct skb_data *)skb->cb;
2768 
2769 	spin_lock_irqsave(&list->lock, flags);
2770 	old_state = entry->state;
2771 	entry->state = state;
2772 
2773 	__skb_unlink(skb, list);
2774 	spin_unlock(&list->lock);
2775 	spin_lock(&dev->done.lock);
2776 
2777 	__skb_queue_tail(&dev->done, skb);
2778 	if (skb_queue_len(&dev->done) == 1)
2779 		tasklet_schedule(&dev->bh);
2780 	spin_unlock_irqrestore(&dev->done.lock, flags);
2781 
2782 	return old_state;
2783 }
2784 
2785 static void tx_complete(struct urb *urb)
2786 {
2787 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2788 	struct skb_data *entry = (struct skb_data *)skb->cb;
2789 	struct lan78xx_net *dev = entry->dev;
2790 
2791 	if (urb->status == 0) {
2792 		dev->net->stats.tx_packets += entry->num_of_packet;
2793 		dev->net->stats.tx_bytes += entry->length;
2794 	} else {
2795 		dev->net->stats.tx_errors++;
2796 
2797 		switch (urb->status) {
2798 		case -EPIPE:
2799 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2800 			break;
2801 
2802 		/* software-driven interface shutdown */
2803 		case -ECONNRESET:
2804 		case -ESHUTDOWN:
2805 			break;
2806 
2807 		case -EPROTO:
2808 		case -ETIME:
2809 		case -EILSEQ:
2810 			netif_stop_queue(dev->net);
2811 			break;
2812 		default:
2813 			netif_dbg(dev, tx_err, dev->net,
2814 				  "tx err %d\n", entry->urb->status);
2815 			break;
2816 		}
2817 	}
2818 
2819 	usb_autopm_put_interface_async(dev->intf);
2820 
2821 	defer_bh(dev, skb, &dev->txq, tx_done);
2822 }
2823 
2824 static void lan78xx_queue_skb(struct sk_buff_head *list,
2825 			      struct sk_buff *newsk, enum skb_state state)
2826 {
2827 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2828 
2829 	__skb_queue_tail(list, newsk);
2830 	entry->state = state;
2831 }
2832 
2833 static netdev_tx_t
2834 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2835 {
2836 	struct lan78xx_net *dev = netdev_priv(net);
2837 	struct sk_buff *skb2 = NULL;
2838 
2839 	if (skb) {
2840 		skb_tx_timestamp(skb);
2841 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2842 	}
2843 
2844 	if (skb2) {
2845 		skb_queue_tail(&dev->txq_pend, skb2);
2846 
2847 		/* throttle TX patch at slower than SUPER SPEED USB */
2848 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2849 		    (skb_queue_len(&dev->txq_pend) > 10))
2850 			netif_stop_queue(net);
2851 	} else {
2852 		netif_dbg(dev, tx_err, dev->net,
2853 			  "lan78xx_tx_prep return NULL\n");
2854 		dev->net->stats.tx_errors++;
2855 		dev->net->stats.tx_dropped++;
2856 	}
2857 
2858 	tasklet_schedule(&dev->bh);
2859 
2860 	return NETDEV_TX_OK;
2861 }
2862 
2863 static int
2864 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2865 {
2866 	int tmp;
2867 	struct usb_host_interface *alt = NULL;
2868 	struct usb_host_endpoint *in = NULL, *out = NULL;
2869 	struct usb_host_endpoint *status = NULL;
2870 
2871 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2872 		unsigned ep;
2873 
2874 		in = NULL;
2875 		out = NULL;
2876 		status = NULL;
2877 		alt = intf->altsetting + tmp;
2878 
2879 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2880 			struct usb_host_endpoint *e;
2881 			int intr = 0;
2882 
2883 			e = alt->endpoint + ep;
2884 			switch (e->desc.bmAttributes) {
2885 			case USB_ENDPOINT_XFER_INT:
2886 				if (!usb_endpoint_dir_in(&e->desc))
2887 					continue;
2888 				intr = 1;
2889 				/* FALLTHROUGH */
2890 			case USB_ENDPOINT_XFER_BULK:
2891 				break;
2892 			default:
2893 				continue;
2894 			}
2895 			if (usb_endpoint_dir_in(&e->desc)) {
2896 				if (!intr && !in)
2897 					in = e;
2898 				else if (intr && !status)
2899 					status = e;
2900 			} else {
2901 				if (!out)
2902 					out = e;
2903 			}
2904 		}
2905 		if (in && out)
2906 			break;
2907 	}
2908 	if (!alt || !in || !out)
2909 		return -EINVAL;
2910 
2911 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2912 				       in->desc.bEndpointAddress &
2913 				       USB_ENDPOINT_NUMBER_MASK);
2914 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2915 					out->desc.bEndpointAddress &
2916 					USB_ENDPOINT_NUMBER_MASK);
2917 	dev->ep_intr = status;
2918 
2919 	return 0;
2920 }
2921 
2922 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2923 {
2924 	struct lan78xx_priv *pdata = NULL;
2925 	int ret;
2926 	int i;
2927 
2928 	ret = lan78xx_get_endpoints(dev, intf);
2929 	if (ret) {
2930 		netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
2931 			    ret);
2932 		return ret;
2933 	}
2934 
2935 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2936 
2937 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2938 	if (!pdata) {
2939 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2940 		return -ENOMEM;
2941 	}
2942 
2943 	pdata->dev = dev;
2944 
2945 	spin_lock_init(&pdata->rfe_ctl_lock);
2946 	mutex_init(&pdata->dataport_mutex);
2947 
2948 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2949 
2950 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2951 		pdata->vlan_table[i] = 0;
2952 
2953 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2954 
2955 	dev->net->features = 0;
2956 
2957 	if (DEFAULT_TX_CSUM_ENABLE)
2958 		dev->net->features |= NETIF_F_HW_CSUM;
2959 
2960 	if (DEFAULT_RX_CSUM_ENABLE)
2961 		dev->net->features |= NETIF_F_RXCSUM;
2962 
2963 	if (DEFAULT_TSO_CSUM_ENABLE)
2964 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2965 
2966 	if (DEFAULT_VLAN_RX_OFFLOAD)
2967 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2968 
2969 	if (DEFAULT_VLAN_FILTER_ENABLE)
2970 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2971 
2972 	dev->net->hw_features = dev->net->features;
2973 
2974 	ret = lan78xx_setup_irq_domain(dev);
2975 	if (ret < 0) {
2976 		netdev_warn(dev->net,
2977 			    "lan78xx_setup_irq_domain() failed : %d", ret);
2978 		goto out1;
2979 	}
2980 
2981 	dev->net->hard_header_len += TX_OVERHEAD;
2982 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2983 
2984 	/* Init all registers */
2985 	ret = lan78xx_reset(dev);
2986 	if (ret) {
2987 		netdev_warn(dev->net, "Registers INIT FAILED....");
2988 		goto out2;
2989 	}
2990 
2991 	ret = lan78xx_mdio_init(dev);
2992 	if (ret) {
2993 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
2994 		goto out2;
2995 	}
2996 
2997 	dev->net->flags |= IFF_MULTICAST;
2998 
2999 	pdata->wol = WAKE_MAGIC;
3000 
3001 	return ret;
3002 
3003 out2:
3004 	lan78xx_remove_irq_domain(dev);
3005 
3006 out1:
3007 	netdev_warn(dev->net, "Bind routine FAILED");
3008 	cancel_work_sync(&pdata->set_multicast);
3009 	cancel_work_sync(&pdata->set_vlan);
3010 	kfree(pdata);
3011 	return ret;
3012 }
3013 
3014 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3015 {
3016 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3017 
3018 	lan78xx_remove_irq_domain(dev);
3019 
3020 	lan78xx_remove_mdio(dev);
3021 
3022 	if (pdata) {
3023 		cancel_work_sync(&pdata->set_multicast);
3024 		cancel_work_sync(&pdata->set_vlan);
3025 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3026 		kfree(pdata);
3027 		pdata = NULL;
3028 		dev->data[0] = 0;
3029 	}
3030 }
3031 
3032 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3033 				    struct sk_buff *skb,
3034 				    u32 rx_cmd_a, u32 rx_cmd_b)
3035 {
3036 	/* HW Checksum offload appears to be flawed if used when not stripping
3037 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3038 	 */
3039 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3040 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3041 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3042 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3043 		skb->ip_summed = CHECKSUM_NONE;
3044 	} else {
3045 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3046 		skb->ip_summed = CHECKSUM_COMPLETE;
3047 	}
3048 }
3049 
3050 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3051 				    struct sk_buff *skb,
3052 				    u32 rx_cmd_a, u32 rx_cmd_b)
3053 {
3054 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3055 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3056 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3057 				       (rx_cmd_b & 0xffff));
3058 }
3059 
3060 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3061 {
3062 	int status;
3063 
3064 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3065 		skb_queue_tail(&dev->rxq_pause, skb);
3066 		return;
3067 	}
3068 
3069 	dev->net->stats.rx_packets++;
3070 	dev->net->stats.rx_bytes += skb->len;
3071 
3072 	skb->protocol = eth_type_trans(skb, dev->net);
3073 
3074 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3075 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3076 	memset(skb->cb, 0, sizeof(struct skb_data));
3077 
3078 	if (skb_defer_rx_timestamp(skb))
3079 		return;
3080 
3081 	status = netif_rx(skb);
3082 	if (status != NET_RX_SUCCESS)
3083 		netif_dbg(dev, rx_err, dev->net,
3084 			  "netif_rx status %d\n", status);
3085 }
3086 
3087 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3088 {
3089 	if (skb->len < dev->net->hard_header_len)
3090 		return 0;
3091 
3092 	while (skb->len > 0) {
3093 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3094 		u16 rx_cmd_c;
3095 		struct sk_buff *skb2;
3096 		unsigned char *packet;
3097 
3098 		rx_cmd_a = get_unaligned_le32(skb->data);
3099 		skb_pull(skb, sizeof(rx_cmd_a));
3100 
3101 		rx_cmd_b = get_unaligned_le32(skb->data);
3102 		skb_pull(skb, sizeof(rx_cmd_b));
3103 
3104 		rx_cmd_c = get_unaligned_le16(skb->data);
3105 		skb_pull(skb, sizeof(rx_cmd_c));
3106 
3107 		packet = skb->data;
3108 
3109 		/* get the packet length */
3110 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3111 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3112 
3113 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3114 			netif_dbg(dev, rx_err, dev->net,
3115 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3116 		} else {
3117 			/* last frame in this batch */
3118 			if (skb->len == size) {
3119 				lan78xx_rx_csum_offload(dev, skb,
3120 							rx_cmd_a, rx_cmd_b);
3121 				lan78xx_rx_vlan_offload(dev, skb,
3122 							rx_cmd_a, rx_cmd_b);
3123 
3124 				skb_trim(skb, skb->len - 4); /* remove fcs */
3125 				skb->truesize = size + sizeof(struct sk_buff);
3126 
3127 				return 1;
3128 			}
3129 
3130 			skb2 = skb_clone(skb, GFP_ATOMIC);
3131 			if (unlikely(!skb2)) {
3132 				netdev_warn(dev->net, "Error allocating skb");
3133 				return 0;
3134 			}
3135 
3136 			skb2->len = size;
3137 			skb2->data = packet;
3138 			skb_set_tail_pointer(skb2, size);
3139 
3140 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3141 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3142 
3143 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3144 			skb2->truesize = size + sizeof(struct sk_buff);
3145 
3146 			lan78xx_skb_return(dev, skb2);
3147 		}
3148 
3149 		skb_pull(skb, size);
3150 
3151 		/* padding bytes before the next frame starts */
3152 		if (skb->len)
3153 			skb_pull(skb, align_count);
3154 	}
3155 
3156 	return 1;
3157 }
3158 
3159 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3160 {
3161 	if (!lan78xx_rx(dev, skb)) {
3162 		dev->net->stats.rx_errors++;
3163 		goto done;
3164 	}
3165 
3166 	if (skb->len) {
3167 		lan78xx_skb_return(dev, skb);
3168 		return;
3169 	}
3170 
3171 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3172 	dev->net->stats.rx_errors++;
3173 done:
3174 	skb_queue_tail(&dev->done, skb);
3175 }
3176 
3177 static void rx_complete(struct urb *urb);
3178 
3179 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3180 {
3181 	struct sk_buff *skb;
3182 	struct skb_data *entry;
3183 	unsigned long lockflags;
3184 	size_t size = dev->rx_urb_size;
3185 	int ret = 0;
3186 
3187 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3188 	if (!skb) {
3189 		usb_free_urb(urb);
3190 		return -ENOMEM;
3191 	}
3192 
3193 	entry = (struct skb_data *)skb->cb;
3194 	entry->urb = urb;
3195 	entry->dev = dev;
3196 	entry->length = 0;
3197 
3198 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3199 			  skb->data, size, rx_complete, skb);
3200 
3201 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3202 
3203 	if (netif_device_present(dev->net) &&
3204 	    netif_running(dev->net) &&
3205 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3206 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3207 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3208 		switch (ret) {
3209 		case 0:
3210 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3211 			break;
3212 		case -EPIPE:
3213 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3214 			break;
3215 		case -ENODEV:
3216 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3217 			netif_device_detach(dev->net);
3218 			break;
3219 		case -EHOSTUNREACH:
3220 			ret = -ENOLINK;
3221 			break;
3222 		default:
3223 			netif_dbg(dev, rx_err, dev->net,
3224 				  "rx submit, %d\n", ret);
3225 			tasklet_schedule(&dev->bh);
3226 		}
3227 	} else {
3228 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3229 		ret = -ENOLINK;
3230 	}
3231 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3232 	if (ret) {
3233 		dev_kfree_skb_any(skb);
3234 		usb_free_urb(urb);
3235 	}
3236 	return ret;
3237 }
3238 
3239 static void rx_complete(struct urb *urb)
3240 {
3241 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3242 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3243 	struct lan78xx_net *dev = entry->dev;
3244 	int urb_status = urb->status;
3245 	enum skb_state state;
3246 
3247 	skb_put(skb, urb->actual_length);
3248 	state = rx_done;
3249 	entry->urb = NULL;
3250 
3251 	switch (urb_status) {
3252 	case 0:
3253 		if (skb->len < dev->net->hard_header_len) {
3254 			state = rx_cleanup;
3255 			dev->net->stats.rx_errors++;
3256 			dev->net->stats.rx_length_errors++;
3257 			netif_dbg(dev, rx_err, dev->net,
3258 				  "rx length %d\n", skb->len);
3259 		}
3260 		usb_mark_last_busy(dev->udev);
3261 		break;
3262 	case -EPIPE:
3263 		dev->net->stats.rx_errors++;
3264 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3265 		/* FALLTHROUGH */
3266 	case -ECONNRESET:				/* async unlink */
3267 	case -ESHUTDOWN:				/* hardware gone */
3268 		netif_dbg(dev, ifdown, dev->net,
3269 			  "rx shutdown, code %d\n", urb_status);
3270 		state = rx_cleanup;
3271 		entry->urb = urb;
3272 		urb = NULL;
3273 		break;
3274 	case -EPROTO:
3275 	case -ETIME:
3276 	case -EILSEQ:
3277 		dev->net->stats.rx_errors++;
3278 		state = rx_cleanup;
3279 		entry->urb = urb;
3280 		urb = NULL;
3281 		break;
3282 
3283 	/* data overrun ... flush fifo? */
3284 	case -EOVERFLOW:
3285 		dev->net->stats.rx_over_errors++;
3286 		/* FALLTHROUGH */
3287 
3288 	default:
3289 		state = rx_cleanup;
3290 		dev->net->stats.rx_errors++;
3291 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3292 		break;
3293 	}
3294 
3295 	state = defer_bh(dev, skb, &dev->rxq, state);
3296 
3297 	if (urb) {
3298 		if (netif_running(dev->net) &&
3299 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3300 		    state != unlink_start) {
3301 			rx_submit(dev, urb, GFP_ATOMIC);
3302 			return;
3303 		}
3304 		usb_free_urb(urb);
3305 	}
3306 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3307 }
3308 
3309 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3310 {
3311 	int length;
3312 	struct urb *urb = NULL;
3313 	struct skb_data *entry;
3314 	unsigned long flags;
3315 	struct sk_buff_head *tqp = &dev->txq_pend;
3316 	struct sk_buff *skb, *skb2;
3317 	int ret;
3318 	int count, pos;
3319 	int skb_totallen, pkt_cnt;
3320 
3321 	skb_totallen = 0;
3322 	pkt_cnt = 0;
3323 	count = 0;
3324 	length = 0;
3325 	spin_lock_irqsave(&tqp->lock, flags);
3326 	skb_queue_walk(tqp, skb) {
3327 		if (skb_is_gso(skb)) {
3328 			if (!skb_queue_is_first(tqp, skb)) {
3329 				/* handle previous packets first */
3330 				break;
3331 			}
3332 			count = 1;
3333 			length = skb->len - TX_OVERHEAD;
3334 			__skb_unlink(skb, tqp);
3335 			spin_unlock_irqrestore(&tqp->lock, flags);
3336 			goto gso_skb;
3337 		}
3338 
3339 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3340 			break;
3341 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3342 		pkt_cnt++;
3343 	}
3344 	spin_unlock_irqrestore(&tqp->lock, flags);
3345 
3346 	/* copy to a single skb */
3347 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3348 	if (!skb)
3349 		goto drop;
3350 
3351 	skb_put(skb, skb_totallen);
3352 
3353 	for (count = pos = 0; count < pkt_cnt; count++) {
3354 		skb2 = skb_dequeue(tqp);
3355 		if (skb2) {
3356 			length += (skb2->len - TX_OVERHEAD);
3357 			memcpy(skb->data + pos, skb2->data, skb2->len);
3358 			pos += roundup(skb2->len, sizeof(u32));
3359 			dev_kfree_skb(skb2);
3360 		}
3361 	}
3362 
3363 gso_skb:
3364 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3365 	if (!urb)
3366 		goto drop;
3367 
3368 	entry = (struct skb_data *)skb->cb;
3369 	entry->urb = urb;
3370 	entry->dev = dev;
3371 	entry->length = length;
3372 	entry->num_of_packet = count;
3373 
3374 	spin_lock_irqsave(&dev->txq.lock, flags);
3375 	ret = usb_autopm_get_interface_async(dev->intf);
3376 	if (ret < 0) {
3377 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3378 		goto drop;
3379 	}
3380 
3381 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3382 			  skb->data, skb->len, tx_complete, skb);
3383 
3384 	if (length % dev->maxpacket == 0) {
3385 		/* send USB_ZERO_PACKET */
3386 		urb->transfer_flags |= URB_ZERO_PACKET;
3387 	}
3388 
3389 #ifdef CONFIG_PM
3390 	/* if this triggers the device is still a sleep */
3391 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3392 		/* transmission will be done in resume */
3393 		usb_anchor_urb(urb, &dev->deferred);
3394 		/* no use to process more packets */
3395 		netif_stop_queue(dev->net);
3396 		usb_put_urb(urb);
3397 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3398 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3399 		return;
3400 	}
3401 #endif
3402 
3403 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3404 	switch (ret) {
3405 	case 0:
3406 		netif_trans_update(dev->net);
3407 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3408 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3409 			netif_stop_queue(dev->net);
3410 		break;
3411 	case -EPIPE:
3412 		netif_stop_queue(dev->net);
3413 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3414 		usb_autopm_put_interface_async(dev->intf);
3415 		break;
3416 	default:
3417 		usb_autopm_put_interface_async(dev->intf);
3418 		netif_dbg(dev, tx_err, dev->net,
3419 			  "tx: submit urb err %d\n", ret);
3420 		break;
3421 	}
3422 
3423 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3424 
3425 	if (ret) {
3426 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3427 drop:
3428 		dev->net->stats.tx_dropped++;
3429 		if (skb)
3430 			dev_kfree_skb_any(skb);
3431 		usb_free_urb(urb);
3432 	} else
3433 		netif_dbg(dev, tx_queued, dev->net,
3434 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3435 }
3436 
3437 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3438 {
3439 	struct urb *urb;
3440 	int i;
3441 
3442 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3443 		for (i = 0; i < 10; i++) {
3444 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3445 				break;
3446 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3447 			if (urb)
3448 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3449 					return;
3450 		}
3451 
3452 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3453 			tasklet_schedule(&dev->bh);
3454 	}
3455 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3456 		netif_wake_queue(dev->net);
3457 }
3458 
3459 static void lan78xx_bh(unsigned long param)
3460 {
3461 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3462 	struct sk_buff *skb;
3463 	struct skb_data *entry;
3464 
3465 	while ((skb = skb_dequeue(&dev->done))) {
3466 		entry = (struct skb_data *)(skb->cb);
3467 		switch (entry->state) {
3468 		case rx_done:
3469 			entry->state = rx_cleanup;
3470 			rx_process(dev, skb);
3471 			continue;
3472 		case tx_done:
3473 			usb_free_urb(entry->urb);
3474 			dev_kfree_skb(skb);
3475 			continue;
3476 		case rx_cleanup:
3477 			usb_free_urb(entry->urb);
3478 			dev_kfree_skb(skb);
3479 			continue;
3480 		default:
3481 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3482 			return;
3483 		}
3484 	}
3485 
3486 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3487 		/* reset update timer delta */
3488 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3489 			dev->delta = 1;
3490 			mod_timer(&dev->stat_monitor,
3491 				  jiffies + STAT_UPDATE_TIMER);
3492 		}
3493 
3494 		if (!skb_queue_empty(&dev->txq_pend))
3495 			lan78xx_tx_bh(dev);
3496 
3497 		if (!timer_pending(&dev->delay) &&
3498 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3499 			lan78xx_rx_bh(dev);
3500 	}
3501 }
3502 
3503 static void lan78xx_delayedwork(struct work_struct *work)
3504 {
3505 	int status;
3506 	struct lan78xx_net *dev;
3507 
3508 	dev = container_of(work, struct lan78xx_net, wq.work);
3509 
3510 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3511 		unlink_urbs(dev, &dev->txq);
3512 		status = usb_autopm_get_interface(dev->intf);
3513 		if (status < 0)
3514 			goto fail_pipe;
3515 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3516 		usb_autopm_put_interface(dev->intf);
3517 		if (status < 0 &&
3518 		    status != -EPIPE &&
3519 		    status != -ESHUTDOWN) {
3520 			if (netif_msg_tx_err(dev))
3521 fail_pipe:
3522 				netdev_err(dev->net,
3523 					   "can't clear tx halt, status %d\n",
3524 					   status);
3525 		} else {
3526 			clear_bit(EVENT_TX_HALT, &dev->flags);
3527 			if (status != -ESHUTDOWN)
3528 				netif_wake_queue(dev->net);
3529 		}
3530 	}
3531 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3532 		unlink_urbs(dev, &dev->rxq);
3533 		status = usb_autopm_get_interface(dev->intf);
3534 		if (status < 0)
3535 				goto fail_halt;
3536 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3537 		usb_autopm_put_interface(dev->intf);
3538 		if (status < 0 &&
3539 		    status != -EPIPE &&
3540 		    status != -ESHUTDOWN) {
3541 			if (netif_msg_rx_err(dev))
3542 fail_halt:
3543 				netdev_err(dev->net,
3544 					   "can't clear rx halt, status %d\n",
3545 					   status);
3546 		} else {
3547 			clear_bit(EVENT_RX_HALT, &dev->flags);
3548 			tasklet_schedule(&dev->bh);
3549 		}
3550 	}
3551 
3552 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3553 		int ret = 0;
3554 
3555 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3556 		status = usb_autopm_get_interface(dev->intf);
3557 		if (status < 0)
3558 			goto skip_reset;
3559 		if (lan78xx_link_reset(dev) < 0) {
3560 			usb_autopm_put_interface(dev->intf);
3561 skip_reset:
3562 			netdev_info(dev->net, "link reset failed (%d)\n",
3563 				    ret);
3564 		} else {
3565 			usb_autopm_put_interface(dev->intf);
3566 		}
3567 	}
3568 
3569 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3570 		lan78xx_update_stats(dev);
3571 
3572 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3573 
3574 		mod_timer(&dev->stat_monitor,
3575 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3576 
3577 		dev->delta = min((dev->delta * 2), 50);
3578 	}
3579 }
3580 
3581 static void intr_complete(struct urb *urb)
3582 {
3583 	struct lan78xx_net *dev = urb->context;
3584 	int status = urb->status;
3585 
3586 	switch (status) {
3587 	/* success */
3588 	case 0:
3589 		lan78xx_status(dev, urb);
3590 		break;
3591 
3592 	/* software-driven interface shutdown */
3593 	case -ENOENT:			/* urb killed */
3594 	case -ESHUTDOWN:		/* hardware gone */
3595 		netif_dbg(dev, ifdown, dev->net,
3596 			  "intr shutdown, code %d\n", status);
3597 		return;
3598 
3599 	/* NOTE:  not throttling like RX/TX, since this endpoint
3600 	 * already polls infrequently
3601 	 */
3602 	default:
3603 		netdev_dbg(dev->net, "intr status %d\n", status);
3604 		break;
3605 	}
3606 
3607 	if (!netif_running(dev->net))
3608 		return;
3609 
3610 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3611 	status = usb_submit_urb(urb, GFP_ATOMIC);
3612 	if (status != 0)
3613 		netif_err(dev, timer, dev->net,
3614 			  "intr resubmit --> %d\n", status);
3615 }
3616 
3617 static void lan78xx_disconnect(struct usb_interface *intf)
3618 {
3619 	struct lan78xx_net *dev;
3620 	struct usb_device *udev;
3621 	struct net_device *net;
3622 	struct phy_device *phydev;
3623 
3624 	dev = usb_get_intfdata(intf);
3625 	usb_set_intfdata(intf, NULL);
3626 	if (!dev)
3627 		return;
3628 
3629 	udev = interface_to_usbdev(intf);
3630 	net = dev->net;
3631 	phydev = net->phydev;
3632 
3633 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3634 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3635 
3636 	phy_disconnect(net->phydev);
3637 
3638 	if (phy_is_pseudo_fixed_link(phydev))
3639 		fixed_phy_unregister(phydev);
3640 
3641 	unregister_netdev(net);
3642 
3643 	cancel_delayed_work_sync(&dev->wq);
3644 
3645 	usb_scuttle_anchored_urbs(&dev->deferred);
3646 
3647 	lan78xx_unbind(dev, intf);
3648 
3649 	usb_kill_urb(dev->urb_intr);
3650 	usb_free_urb(dev->urb_intr);
3651 
3652 	free_netdev(net);
3653 	usb_put_dev(udev);
3654 }
3655 
3656 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3657 {
3658 	struct lan78xx_net *dev = netdev_priv(net);
3659 
3660 	unlink_urbs(dev, &dev->txq);
3661 	tasklet_schedule(&dev->bh);
3662 }
3663 
3664 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3665 						struct net_device *netdev,
3666 						netdev_features_t features)
3667 {
3668 	if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3669 		features &= ~NETIF_F_GSO_MASK;
3670 
3671 	features = vlan_features_check(skb, features);
3672 	features = vxlan_features_check(skb, features);
3673 
3674 	return features;
3675 }
3676 
3677 static const struct net_device_ops lan78xx_netdev_ops = {
3678 	.ndo_open		= lan78xx_open,
3679 	.ndo_stop		= lan78xx_stop,
3680 	.ndo_start_xmit		= lan78xx_start_xmit,
3681 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3682 	.ndo_change_mtu		= lan78xx_change_mtu,
3683 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3684 	.ndo_validate_addr	= eth_validate_addr,
3685 	.ndo_do_ioctl		= phy_do_ioctl_running,
3686 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3687 	.ndo_set_features	= lan78xx_set_features,
3688 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3689 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3690 	.ndo_features_check	= lan78xx_features_check,
3691 };
3692 
3693 static void lan78xx_stat_monitor(struct timer_list *t)
3694 {
3695 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3696 
3697 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3698 }
3699 
3700 static int lan78xx_probe(struct usb_interface *intf,
3701 			 const struct usb_device_id *id)
3702 {
3703 	struct lan78xx_net *dev;
3704 	struct net_device *netdev;
3705 	struct usb_device *udev;
3706 	int ret;
3707 	unsigned maxp;
3708 	unsigned period;
3709 	u8 *buf = NULL;
3710 
3711 	udev = interface_to_usbdev(intf);
3712 	udev = usb_get_dev(udev);
3713 
3714 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3715 	if (!netdev) {
3716 		dev_err(&intf->dev, "Error: OOM\n");
3717 		ret = -ENOMEM;
3718 		goto out1;
3719 	}
3720 
3721 	/* netdev_printk() needs this */
3722 	SET_NETDEV_DEV(netdev, &intf->dev);
3723 
3724 	dev = netdev_priv(netdev);
3725 	dev->udev = udev;
3726 	dev->intf = intf;
3727 	dev->net = netdev;
3728 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3729 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3730 
3731 	skb_queue_head_init(&dev->rxq);
3732 	skb_queue_head_init(&dev->txq);
3733 	skb_queue_head_init(&dev->done);
3734 	skb_queue_head_init(&dev->rxq_pause);
3735 	skb_queue_head_init(&dev->txq_pend);
3736 	mutex_init(&dev->phy_mutex);
3737 
3738 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3739 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3740 	init_usb_anchor(&dev->deferred);
3741 
3742 	netdev->netdev_ops = &lan78xx_netdev_ops;
3743 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3744 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3745 
3746 	dev->delta = 1;
3747 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3748 
3749 	mutex_init(&dev->stats.access_lock);
3750 
3751 	ret = lan78xx_bind(dev, intf);
3752 	if (ret < 0)
3753 		goto out2;
3754 
3755 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3756 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3757 
3758 	/* MTU range: 68 - 9000 */
3759 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3760 	netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3761 
3762 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3763 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3764 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3765 
3766 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3767 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3768 
3769 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3770 					dev->ep_intr->desc.bEndpointAddress &
3771 					USB_ENDPOINT_NUMBER_MASK);
3772 	period = dev->ep_intr->desc.bInterval;
3773 
3774 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3775 	buf = kmalloc(maxp, GFP_KERNEL);
3776 	if (buf) {
3777 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3778 		if (!dev->urb_intr) {
3779 			ret = -ENOMEM;
3780 			kfree(buf);
3781 			goto out3;
3782 		} else {
3783 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3784 					 dev->pipe_intr, buf, maxp,
3785 					 intr_complete, dev, period);
3786 		}
3787 	}
3788 
3789 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3790 
3791 	/* driver requires remote-wakeup capability during autosuspend. */
3792 	intf->needs_remote_wakeup = 1;
3793 
3794 	ret = lan78xx_phy_init(dev);
3795 	if (ret < 0)
3796 		goto out4;
3797 
3798 	ret = register_netdev(netdev);
3799 	if (ret != 0) {
3800 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3801 		goto out5;
3802 	}
3803 
3804 	usb_set_intfdata(intf, dev);
3805 
3806 	ret = device_set_wakeup_enable(&udev->dev, true);
3807 
3808 	 /* Default delay of 2sec has more overhead than advantage.
3809 	  * Set to 10sec as default.
3810 	  */
3811 	pm_runtime_set_autosuspend_delay(&udev->dev,
3812 					 DEFAULT_AUTOSUSPEND_DELAY);
3813 
3814 	return 0;
3815 
3816 out5:
3817 	phy_disconnect(netdev->phydev);
3818 out4:
3819 	usb_free_urb(dev->urb_intr);
3820 out3:
3821 	lan78xx_unbind(dev, intf);
3822 out2:
3823 	free_netdev(netdev);
3824 out1:
3825 	usb_put_dev(udev);
3826 
3827 	return ret;
3828 }
3829 
3830 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3831 {
3832 	const u16 crc16poly = 0x8005;
3833 	int i;
3834 	u16 bit, crc, msb;
3835 	u8 data;
3836 
3837 	crc = 0xFFFF;
3838 	for (i = 0; i < len; i++) {
3839 		data = *buf++;
3840 		for (bit = 0; bit < 8; bit++) {
3841 			msb = crc >> 15;
3842 			crc <<= 1;
3843 
3844 			if (msb ^ (u16)(data & 1)) {
3845 				crc ^= crc16poly;
3846 				crc |= (u16)0x0001U;
3847 			}
3848 			data >>= 1;
3849 		}
3850 	}
3851 
3852 	return crc;
3853 }
3854 
3855 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3856 {
3857 	u32 buf;
3858 	int ret;
3859 	int mask_index;
3860 	u16 crc;
3861 	u32 temp_wucsr;
3862 	u32 temp_pmt_ctl;
3863 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3864 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3865 	const u8 arp_type[2] = { 0x08, 0x06 };
3866 
3867 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3868 	buf &= ~MAC_TX_TXEN_;
3869 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3870 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3871 	buf &= ~MAC_RX_RXEN_;
3872 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3873 
3874 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3875 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3876 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3877 
3878 	temp_wucsr = 0;
3879 
3880 	temp_pmt_ctl = 0;
3881 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3882 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3883 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3884 
3885 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3886 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3887 
3888 	mask_index = 0;
3889 	if (wol & WAKE_PHY) {
3890 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3891 
3892 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3893 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3894 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3895 	}
3896 	if (wol & WAKE_MAGIC) {
3897 		temp_wucsr |= WUCSR_MPEN_;
3898 
3899 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3900 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3901 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3902 	}
3903 	if (wol & WAKE_BCAST) {
3904 		temp_wucsr |= WUCSR_BCST_EN_;
3905 
3906 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3907 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3908 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3909 	}
3910 	if (wol & WAKE_MCAST) {
3911 		temp_wucsr |= WUCSR_WAKE_EN_;
3912 
3913 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3914 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3915 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3916 					WUF_CFGX_EN_ |
3917 					WUF_CFGX_TYPE_MCAST_ |
3918 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3919 					(crc & WUF_CFGX_CRC16_MASK_));
3920 
3921 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3922 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3923 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3924 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3925 		mask_index++;
3926 
3927 		/* for IPv6 Multicast */
3928 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3929 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3930 					WUF_CFGX_EN_ |
3931 					WUF_CFGX_TYPE_MCAST_ |
3932 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3933 					(crc & WUF_CFGX_CRC16_MASK_));
3934 
3935 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3936 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3937 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3938 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3939 		mask_index++;
3940 
3941 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3942 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3943 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3944 	}
3945 	if (wol & WAKE_UCAST) {
3946 		temp_wucsr |= WUCSR_PFDA_EN_;
3947 
3948 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3949 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3950 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3951 	}
3952 	if (wol & WAKE_ARP) {
3953 		temp_wucsr |= WUCSR_WAKE_EN_;
3954 
3955 		/* set WUF_CFG & WUF_MASK
3956 		 * for packettype (offset 12,13) = ARP (0x0806)
3957 		 */
3958 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3959 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3960 					WUF_CFGX_EN_ |
3961 					WUF_CFGX_TYPE_ALL_ |
3962 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3963 					(crc & WUF_CFGX_CRC16_MASK_));
3964 
3965 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3966 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3967 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3968 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3969 		mask_index++;
3970 
3971 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3972 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3973 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3974 	}
3975 
3976 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3977 
3978 	/* when multiple WOL bits are set */
3979 	if (hweight_long((unsigned long)wol) > 1) {
3980 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3981 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3982 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3983 	}
3984 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3985 
3986 	/* clear WUPS */
3987 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3988 	buf |= PMT_CTL_WUPS_MASK_;
3989 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3990 
3991 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3992 	buf |= MAC_RX_RXEN_;
3993 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3994 
3995 	return 0;
3996 }
3997 
3998 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3999 {
4000 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4001 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
4002 	u32 buf;
4003 	int ret;
4004 
4005 	if (!dev->suspend_count++) {
4006 		spin_lock_irq(&dev->txq.lock);
4007 		/* don't autosuspend while transmitting */
4008 		if ((skb_queue_len(&dev->txq) ||
4009 		     skb_queue_len(&dev->txq_pend)) &&
4010 			PMSG_IS_AUTO(message)) {
4011 			spin_unlock_irq(&dev->txq.lock);
4012 			ret = -EBUSY;
4013 			goto out;
4014 		} else {
4015 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4016 			spin_unlock_irq(&dev->txq.lock);
4017 		}
4018 
4019 		/* stop TX & RX */
4020 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4021 		buf &= ~MAC_TX_TXEN_;
4022 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
4023 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4024 		buf &= ~MAC_RX_RXEN_;
4025 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
4026 
4027 		/* empty out the rx and queues */
4028 		netif_device_detach(dev->net);
4029 		lan78xx_terminate_urbs(dev);
4030 		usb_kill_urb(dev->urb_intr);
4031 
4032 		/* reattach */
4033 		netif_device_attach(dev->net);
4034 	}
4035 
4036 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4037 		del_timer(&dev->stat_monitor);
4038 
4039 		if (PMSG_IS_AUTO(message)) {
4040 			/* auto suspend (selective suspend) */
4041 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4042 			buf &= ~MAC_TX_TXEN_;
4043 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
4044 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4045 			buf &= ~MAC_RX_RXEN_;
4046 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4047 
4048 			ret = lan78xx_write_reg(dev, WUCSR, 0);
4049 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
4050 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4051 
4052 			/* set goodframe wakeup */
4053 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
4054 
4055 			buf |= WUCSR_RFE_WAKE_EN_;
4056 			buf |= WUCSR_STORE_WAKE_;
4057 
4058 			ret = lan78xx_write_reg(dev, WUCSR, buf);
4059 
4060 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4061 
4062 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4063 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
4064 
4065 			buf |= PMT_CTL_PHY_WAKE_EN_;
4066 			buf |= PMT_CTL_WOL_EN_;
4067 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
4068 			buf |= PMT_CTL_SUS_MODE_3_;
4069 
4070 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4071 
4072 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4073 
4074 			buf |= PMT_CTL_WUPS_MASK_;
4075 
4076 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4077 
4078 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4079 			buf |= MAC_RX_RXEN_;
4080 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4081 		} else {
4082 			lan78xx_set_suspend(dev, pdata->wol);
4083 		}
4084 	}
4085 
4086 	ret = 0;
4087 out:
4088 	return ret;
4089 }
4090 
4091 static int lan78xx_resume(struct usb_interface *intf)
4092 {
4093 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4094 	struct sk_buff *skb;
4095 	struct urb *res;
4096 	int ret;
4097 	u32 buf;
4098 
4099 	if (!timer_pending(&dev->stat_monitor)) {
4100 		dev->delta = 1;
4101 		mod_timer(&dev->stat_monitor,
4102 			  jiffies + STAT_UPDATE_TIMER);
4103 	}
4104 
4105 	if (!--dev->suspend_count) {
4106 		/* resume interrupt URBs */
4107 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4108 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
4109 
4110 		spin_lock_irq(&dev->txq.lock);
4111 		while ((res = usb_get_from_anchor(&dev->deferred))) {
4112 			skb = (struct sk_buff *)res->context;
4113 			ret = usb_submit_urb(res, GFP_ATOMIC);
4114 			if (ret < 0) {
4115 				dev_kfree_skb_any(skb);
4116 				usb_free_urb(res);
4117 				usb_autopm_put_interface_async(dev->intf);
4118 			} else {
4119 				netif_trans_update(dev->net);
4120 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
4121 			}
4122 		}
4123 
4124 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4125 		spin_unlock_irq(&dev->txq.lock);
4126 
4127 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4128 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4129 				netif_start_queue(dev->net);
4130 			tasklet_schedule(&dev->bh);
4131 		}
4132 	}
4133 
4134 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4135 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4136 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4137 
4138 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4139 					     WUCSR2_ARP_RCD_ |
4140 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4141 					     WUCSR2_IPV4_TCPSYN_RCD_);
4142 
4143 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4144 					    WUCSR_EEE_RX_WAKE_ |
4145 					    WUCSR_PFDA_FR_ |
4146 					    WUCSR_RFE_WAKE_FR_ |
4147 					    WUCSR_WUFR_ |
4148 					    WUCSR_MPR_ |
4149 					    WUCSR_BCST_FR_);
4150 
4151 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4152 	buf |= MAC_TX_TXEN_;
4153 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
4154 
4155 	return 0;
4156 }
4157 
4158 static int lan78xx_reset_resume(struct usb_interface *intf)
4159 {
4160 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4161 
4162 	lan78xx_reset(dev);
4163 
4164 	phy_start(dev->net->phydev);
4165 
4166 	return lan78xx_resume(intf);
4167 }
4168 
4169 static const struct usb_device_id products[] = {
4170 	{
4171 	/* LAN7800 USB Gigabit Ethernet Device */
4172 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4173 	},
4174 	{
4175 	/* LAN7850 USB Gigabit Ethernet Device */
4176 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4177 	},
4178 	{
4179 	/* LAN7801 USB Gigabit Ethernet Device */
4180 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4181 	},
4182 	{},
4183 };
4184 MODULE_DEVICE_TABLE(usb, products);
4185 
4186 static struct usb_driver lan78xx_driver = {
4187 	.name			= DRIVER_NAME,
4188 	.id_table		= products,
4189 	.probe			= lan78xx_probe,
4190 	.disconnect		= lan78xx_disconnect,
4191 	.suspend		= lan78xx_suspend,
4192 	.resume			= lan78xx_resume,
4193 	.reset_resume		= lan78xx_reset_resume,
4194 	.supports_autosuspend	= 1,
4195 	.disable_hub_initiated_lpm = 1,
4196 };
4197 
4198 module_usb_driver(lan78xx_driver);
4199 
4200 MODULE_AUTHOR(DRIVER_AUTHOR);
4201 MODULE_DESCRIPTION(DRIVER_DESC);
4202 MODULE_LICENSE("GPL");
4203