xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision 110e6f26)
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
34 #include "lan78xx.h"
35 
36 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME	"lan78xx"
39 #define DRIVER_VERSION	"1.0.4"
40 
41 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
42 #define THROTTLE_JIFFIES		(HZ / 8)
43 #define UNLINK_TIMEOUT_MS		3
44 
45 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
46 
47 #define SS_USB_PKT_SIZE			(1024)
48 #define HS_USB_PKT_SIZE			(512)
49 #define FS_USB_PKT_SIZE			(64)
50 
51 #define MAX_RX_FIFO_SIZE		(12 * 1024)
52 #define MAX_TX_FIFO_SIZE		(12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY		(0x0800)
55 #define MAX_SINGLE_PACKET_SIZE		(9000)
56 #define DEFAULT_TX_CSUM_ENABLE		(true)
57 #define DEFAULT_RX_CSUM_ENABLE		(true)
58 #define DEFAULT_TSO_CSUM_ENABLE		(true)
59 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
60 #define TX_OVERHEAD			(8)
61 #define RXW_PADDING			2
62 
63 #define LAN78XX_USB_VENDOR_ID		(0x0424)
64 #define LAN7800_USB_PRODUCT_ID		(0x7800)
65 #define LAN7850_USB_PRODUCT_ID		(0x7850)
66 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
67 #define LAN78XX_OTP_MAGIC		(0x78F3)
68 
69 #define	MII_READ			1
70 #define	MII_WRITE			0
71 
72 #define EEPROM_INDICATOR		(0xA5)
73 #define EEPROM_MAC_OFFSET		(0x01)
74 #define MAX_EEPROM_SIZE			512
75 #define OTP_INDICATOR_1			(0xF3)
76 #define OTP_INDICATOR_2			(0xF7)
77 
78 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
79 					 WAKE_MCAST | WAKE_BCAST | \
80 					 WAKE_ARP | WAKE_MAGIC)
81 
82 /* USB related defines */
83 #define BULK_IN_PIPE			1
84 #define BULK_OUT_PIPE			2
85 
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
88 
89 /* statistic update interval (mSec) */
90 #define STAT_UPDATE_TIMER		(1 * 1000)
91 
92 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
93 	"RX FCS Errors",
94 	"RX Alignment Errors",
95 	"Rx Fragment Errors",
96 	"RX Jabber Errors",
97 	"RX Undersize Frame Errors",
98 	"RX Oversize Frame Errors",
99 	"RX Dropped Frames",
100 	"RX Unicast Byte Count",
101 	"RX Broadcast Byte Count",
102 	"RX Multicast Byte Count",
103 	"RX Unicast Frames",
104 	"RX Broadcast Frames",
105 	"RX Multicast Frames",
106 	"RX Pause Frames",
107 	"RX 64 Byte Frames",
108 	"RX 65 - 127 Byte Frames",
109 	"RX 128 - 255 Byte Frames",
110 	"RX 256 - 511 Bytes Frames",
111 	"RX 512 - 1023 Byte Frames",
112 	"RX 1024 - 1518 Byte Frames",
113 	"RX Greater 1518 Byte Frames",
114 	"EEE RX LPI Transitions",
115 	"EEE RX LPI Time",
116 	"TX FCS Errors",
117 	"TX Excess Deferral Errors",
118 	"TX Carrier Errors",
119 	"TX Bad Byte Count",
120 	"TX Single Collisions",
121 	"TX Multiple Collisions",
122 	"TX Excessive Collision",
123 	"TX Late Collisions",
124 	"TX Unicast Byte Count",
125 	"TX Broadcast Byte Count",
126 	"TX Multicast Byte Count",
127 	"TX Unicast Frames",
128 	"TX Broadcast Frames",
129 	"TX Multicast Frames",
130 	"TX Pause Frames",
131 	"TX 64 Byte Frames",
132 	"TX 65 - 127 Byte Frames",
133 	"TX 128 - 255 Byte Frames",
134 	"TX 256 - 511 Bytes Frames",
135 	"TX 512 - 1023 Byte Frames",
136 	"TX 1024 - 1518 Byte Frames",
137 	"TX Greater 1518 Byte Frames",
138 	"EEE TX LPI Transitions",
139 	"EEE TX LPI Time",
140 };
141 
142 struct lan78xx_statstage {
143 	u32 rx_fcs_errors;
144 	u32 rx_alignment_errors;
145 	u32 rx_fragment_errors;
146 	u32 rx_jabber_errors;
147 	u32 rx_undersize_frame_errors;
148 	u32 rx_oversize_frame_errors;
149 	u32 rx_dropped_frames;
150 	u32 rx_unicast_byte_count;
151 	u32 rx_broadcast_byte_count;
152 	u32 rx_multicast_byte_count;
153 	u32 rx_unicast_frames;
154 	u32 rx_broadcast_frames;
155 	u32 rx_multicast_frames;
156 	u32 rx_pause_frames;
157 	u32 rx_64_byte_frames;
158 	u32 rx_65_127_byte_frames;
159 	u32 rx_128_255_byte_frames;
160 	u32 rx_256_511_bytes_frames;
161 	u32 rx_512_1023_byte_frames;
162 	u32 rx_1024_1518_byte_frames;
163 	u32 rx_greater_1518_byte_frames;
164 	u32 eee_rx_lpi_transitions;
165 	u32 eee_rx_lpi_time;
166 	u32 tx_fcs_errors;
167 	u32 tx_excess_deferral_errors;
168 	u32 tx_carrier_errors;
169 	u32 tx_bad_byte_count;
170 	u32 tx_single_collisions;
171 	u32 tx_multiple_collisions;
172 	u32 tx_excessive_collision;
173 	u32 tx_late_collisions;
174 	u32 tx_unicast_byte_count;
175 	u32 tx_broadcast_byte_count;
176 	u32 tx_multicast_byte_count;
177 	u32 tx_unicast_frames;
178 	u32 tx_broadcast_frames;
179 	u32 tx_multicast_frames;
180 	u32 tx_pause_frames;
181 	u32 tx_64_byte_frames;
182 	u32 tx_65_127_byte_frames;
183 	u32 tx_128_255_byte_frames;
184 	u32 tx_256_511_bytes_frames;
185 	u32 tx_512_1023_byte_frames;
186 	u32 tx_1024_1518_byte_frames;
187 	u32 tx_greater_1518_byte_frames;
188 	u32 eee_tx_lpi_transitions;
189 	u32 eee_tx_lpi_time;
190 };
191 
192 struct lan78xx_statstage64 {
193 	u64 rx_fcs_errors;
194 	u64 rx_alignment_errors;
195 	u64 rx_fragment_errors;
196 	u64 rx_jabber_errors;
197 	u64 rx_undersize_frame_errors;
198 	u64 rx_oversize_frame_errors;
199 	u64 rx_dropped_frames;
200 	u64 rx_unicast_byte_count;
201 	u64 rx_broadcast_byte_count;
202 	u64 rx_multicast_byte_count;
203 	u64 rx_unicast_frames;
204 	u64 rx_broadcast_frames;
205 	u64 rx_multicast_frames;
206 	u64 rx_pause_frames;
207 	u64 rx_64_byte_frames;
208 	u64 rx_65_127_byte_frames;
209 	u64 rx_128_255_byte_frames;
210 	u64 rx_256_511_bytes_frames;
211 	u64 rx_512_1023_byte_frames;
212 	u64 rx_1024_1518_byte_frames;
213 	u64 rx_greater_1518_byte_frames;
214 	u64 eee_rx_lpi_transitions;
215 	u64 eee_rx_lpi_time;
216 	u64 tx_fcs_errors;
217 	u64 tx_excess_deferral_errors;
218 	u64 tx_carrier_errors;
219 	u64 tx_bad_byte_count;
220 	u64 tx_single_collisions;
221 	u64 tx_multiple_collisions;
222 	u64 tx_excessive_collision;
223 	u64 tx_late_collisions;
224 	u64 tx_unicast_byte_count;
225 	u64 tx_broadcast_byte_count;
226 	u64 tx_multicast_byte_count;
227 	u64 tx_unicast_frames;
228 	u64 tx_broadcast_frames;
229 	u64 tx_multicast_frames;
230 	u64 tx_pause_frames;
231 	u64 tx_64_byte_frames;
232 	u64 tx_65_127_byte_frames;
233 	u64 tx_128_255_byte_frames;
234 	u64 tx_256_511_bytes_frames;
235 	u64 tx_512_1023_byte_frames;
236 	u64 tx_1024_1518_byte_frames;
237 	u64 tx_greater_1518_byte_frames;
238 	u64 eee_tx_lpi_transitions;
239 	u64 eee_tx_lpi_time;
240 };
241 
242 struct lan78xx_net;
243 
244 struct lan78xx_priv {
245 	struct lan78xx_net *dev;
246 	u32 rfe_ctl;
247 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
248 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
249 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
250 	struct mutex dataport_mutex; /* for dataport access */
251 	spinlock_t rfe_ctl_lock; /* for rfe register access */
252 	struct work_struct set_multicast;
253 	struct work_struct set_vlan;
254 	u32 wol;
255 };
256 
257 enum skb_state {
258 	illegal = 0,
259 	tx_start,
260 	tx_done,
261 	rx_start,
262 	rx_done,
263 	rx_cleanup,
264 	unlink_start
265 };
266 
267 struct skb_data {		/* skb->cb is one of these */
268 	struct urb *urb;
269 	struct lan78xx_net *dev;
270 	enum skb_state state;
271 	size_t length;
272 };
273 
274 struct usb_context {
275 	struct usb_ctrlrequest req;
276 	struct lan78xx_net *dev;
277 };
278 
279 #define EVENT_TX_HALT			0
280 #define EVENT_RX_HALT			1
281 #define EVENT_RX_MEMORY			2
282 #define EVENT_STS_SPLIT			3
283 #define EVENT_LINK_RESET		4
284 #define EVENT_RX_PAUSED			5
285 #define EVENT_DEV_WAKING		6
286 #define EVENT_DEV_ASLEEP		7
287 #define EVENT_DEV_OPEN			8
288 #define EVENT_STAT_UPDATE		9
289 
290 struct statstage {
291 	struct mutex			access_lock;	/* for stats access */
292 	struct lan78xx_statstage	saved;
293 	struct lan78xx_statstage	rollover_count;
294 	struct lan78xx_statstage	rollover_max;
295 	struct lan78xx_statstage64	curr_stat;
296 };
297 
298 struct lan78xx_net {
299 	struct net_device	*net;
300 	struct usb_device	*udev;
301 	struct usb_interface	*intf;
302 	void			*driver_priv;
303 
304 	int			rx_qlen;
305 	int			tx_qlen;
306 	struct sk_buff_head	rxq;
307 	struct sk_buff_head	txq;
308 	struct sk_buff_head	done;
309 	struct sk_buff_head	rxq_pause;
310 	struct sk_buff_head	txq_pend;
311 
312 	struct tasklet_struct	bh;
313 	struct delayed_work	wq;
314 
315 	struct usb_host_endpoint *ep_blkin;
316 	struct usb_host_endpoint *ep_blkout;
317 	struct usb_host_endpoint *ep_intr;
318 
319 	int			msg_enable;
320 
321 	struct urb		*urb_intr;
322 	struct usb_anchor	deferred;
323 
324 	struct mutex		phy_mutex; /* for phy access */
325 	unsigned		pipe_in, pipe_out, pipe_intr;
326 
327 	u32			hard_mtu;	/* count any extra framing */
328 	size_t			rx_urb_size;	/* size for rx urbs */
329 
330 	unsigned long		flags;
331 
332 	wait_queue_head_t	*wait;
333 	unsigned char		suspend_count;
334 
335 	unsigned		maxpacket;
336 	struct timer_list	delay;
337 	struct timer_list	stat_monitor;
338 
339 	unsigned long		data[5];
340 
341 	int			link_on;
342 	u8			mdix_ctrl;
343 
344 	u32			chipid;
345 	u32			chiprev;
346 	struct mii_bus		*mdiobus;
347 
348 	int			fc_autoneg;
349 	u8			fc_request_control;
350 
351 	int			delta;
352 	struct statstage	stats;
353 };
354 
355 /* use ethtool to change the level for any given device */
356 static int msg_level = -1;
357 module_param(msg_level, int, 0);
358 MODULE_PARM_DESC(msg_level, "Override default message level");
359 
360 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
361 {
362 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
363 	int ret;
364 
365 	if (!buf)
366 		return -ENOMEM;
367 
368 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
369 			      USB_VENDOR_REQUEST_READ_REGISTER,
370 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
371 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
372 	if (likely(ret >= 0)) {
373 		le32_to_cpus(buf);
374 		*data = *buf;
375 	} else {
376 		netdev_warn(dev->net,
377 			    "Failed to read register index 0x%08x. ret = %d",
378 			    index, ret);
379 	}
380 
381 	kfree(buf);
382 
383 	return ret;
384 }
385 
386 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
387 {
388 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
389 	int ret;
390 
391 	if (!buf)
392 		return -ENOMEM;
393 
394 	*buf = data;
395 	cpu_to_le32s(buf);
396 
397 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
398 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
399 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
400 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
401 	if (unlikely(ret < 0)) {
402 		netdev_warn(dev->net,
403 			    "Failed to write register index 0x%08x. ret = %d",
404 			    index, ret);
405 	}
406 
407 	kfree(buf);
408 
409 	return ret;
410 }
411 
412 static int lan78xx_read_stats(struct lan78xx_net *dev,
413 			      struct lan78xx_statstage *data)
414 {
415 	int ret = 0;
416 	int i;
417 	struct lan78xx_statstage *stats;
418 	u32 *src;
419 	u32 *dst;
420 
421 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
422 	if (!stats)
423 		return -ENOMEM;
424 
425 	ret = usb_control_msg(dev->udev,
426 			      usb_rcvctrlpipe(dev->udev, 0),
427 			      USB_VENDOR_REQUEST_GET_STATS,
428 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
429 			      0,
430 			      0,
431 			      (void *)stats,
432 			      sizeof(*stats),
433 			      USB_CTRL_SET_TIMEOUT);
434 	if (likely(ret >= 0)) {
435 		src = (u32 *)stats;
436 		dst = (u32 *)data;
437 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
438 			le32_to_cpus(&src[i]);
439 			dst[i] = src[i];
440 		}
441 	} else {
442 		netdev_warn(dev->net,
443 			    "Failed to read stat ret = 0x%x", ret);
444 	}
445 
446 	kfree(stats);
447 
448 	return ret;
449 }
450 
451 #define check_counter_rollover(struct1, dev_stats, member) {	\
452 	if (struct1->member < dev_stats.saved.member)		\
453 		dev_stats.rollover_count.member++;		\
454 	}
455 
456 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
457 					struct lan78xx_statstage *stats)
458 {
459 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
460 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
461 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
462 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
463 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
464 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
465 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
466 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
467 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
468 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
469 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
470 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
471 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
472 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
473 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
474 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
475 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
476 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
477 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
478 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
479 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
480 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
481 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
482 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
483 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
484 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
485 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
486 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
487 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
488 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
489 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
490 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
491 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
492 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
493 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
494 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
495 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
496 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
497 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
498 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
499 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
500 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
501 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
502 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
503 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
504 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
505 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
506 
507 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
508 }
509 
510 static void lan78xx_update_stats(struct lan78xx_net *dev)
511 {
512 	u32 *p, *count, *max;
513 	u64 *data;
514 	int i;
515 	struct lan78xx_statstage lan78xx_stats;
516 
517 	if (usb_autopm_get_interface(dev->intf) < 0)
518 		return;
519 
520 	p = (u32 *)&lan78xx_stats;
521 	count = (u32 *)&dev->stats.rollover_count;
522 	max = (u32 *)&dev->stats.rollover_max;
523 	data = (u64 *)&dev->stats.curr_stat;
524 
525 	mutex_lock(&dev->stats.access_lock);
526 
527 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
528 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
529 
530 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
531 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
532 
533 	mutex_unlock(&dev->stats.access_lock);
534 
535 	usb_autopm_put_interface(dev->intf);
536 }
537 
538 /* Loop until the read is completed with timeout called with phy_mutex held */
539 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
540 {
541 	unsigned long start_time = jiffies;
542 	u32 val;
543 	int ret;
544 
545 	do {
546 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
547 		if (unlikely(ret < 0))
548 			return -EIO;
549 
550 		if (!(val & MII_ACC_MII_BUSY_))
551 			return 0;
552 	} while (!time_after(jiffies, start_time + HZ));
553 
554 	return -EIO;
555 }
556 
557 static inline u32 mii_access(int id, int index, int read)
558 {
559 	u32 ret;
560 
561 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
562 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
563 	if (read)
564 		ret |= MII_ACC_MII_READ_;
565 	else
566 		ret |= MII_ACC_MII_WRITE_;
567 	ret |= MII_ACC_MII_BUSY_;
568 
569 	return ret;
570 }
571 
572 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
573 {
574 	unsigned long start_time = jiffies;
575 	u32 val;
576 	int ret;
577 
578 	do {
579 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
580 		if (unlikely(ret < 0))
581 			return -EIO;
582 
583 		if (!(val & E2P_CMD_EPC_BUSY_) ||
584 		    (val & E2P_CMD_EPC_TIMEOUT_))
585 			break;
586 		usleep_range(40, 100);
587 	} while (!time_after(jiffies, start_time + HZ));
588 
589 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
590 		netdev_warn(dev->net, "EEPROM read operation timeout");
591 		return -EIO;
592 	}
593 
594 	return 0;
595 }
596 
597 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
598 {
599 	unsigned long start_time = jiffies;
600 	u32 val;
601 	int ret;
602 
603 	do {
604 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
605 		if (unlikely(ret < 0))
606 			return -EIO;
607 
608 		if (!(val & E2P_CMD_EPC_BUSY_))
609 			return 0;
610 
611 		usleep_range(40, 100);
612 	} while (!time_after(jiffies, start_time + HZ));
613 
614 	netdev_warn(dev->net, "EEPROM is busy");
615 	return -EIO;
616 }
617 
618 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
619 				   u32 length, u8 *data)
620 {
621 	u32 val;
622 	u32 saved;
623 	int i, ret;
624 	int retval;
625 
626 	/* depends on chip, some EEPROM pins are muxed with LED function.
627 	 * disable & restore LED function to access EEPROM.
628 	 */
629 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
630 	saved = val;
631 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
632 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
633 		ret = lan78xx_write_reg(dev, HW_CFG, val);
634 	}
635 
636 	retval = lan78xx_eeprom_confirm_not_busy(dev);
637 	if (retval)
638 		return retval;
639 
640 	for (i = 0; i < length; i++) {
641 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
642 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
643 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
644 		if (unlikely(ret < 0)) {
645 			retval = -EIO;
646 			goto exit;
647 		}
648 
649 		retval = lan78xx_wait_eeprom(dev);
650 		if (retval < 0)
651 			goto exit;
652 
653 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
654 		if (unlikely(ret < 0)) {
655 			retval = -EIO;
656 			goto exit;
657 		}
658 
659 		data[i] = val & 0xFF;
660 		offset++;
661 	}
662 
663 	retval = 0;
664 exit:
665 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
666 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
667 
668 	return retval;
669 }
670 
671 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
672 			       u32 length, u8 *data)
673 {
674 	u8 sig;
675 	int ret;
676 
677 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
678 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
679 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
680 	else
681 		ret = -EINVAL;
682 
683 	return ret;
684 }
685 
686 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
687 				    u32 length, u8 *data)
688 {
689 	u32 val;
690 	u32 saved;
691 	int i, ret;
692 	int retval;
693 
694 	/* depends on chip, some EEPROM pins are muxed with LED function.
695 	 * disable & restore LED function to access EEPROM.
696 	 */
697 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
698 	saved = val;
699 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
700 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
701 		ret = lan78xx_write_reg(dev, HW_CFG, val);
702 	}
703 
704 	retval = lan78xx_eeprom_confirm_not_busy(dev);
705 	if (retval)
706 		goto exit;
707 
708 	/* Issue write/erase enable command */
709 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
710 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
711 	if (unlikely(ret < 0)) {
712 		retval = -EIO;
713 		goto exit;
714 	}
715 
716 	retval = lan78xx_wait_eeprom(dev);
717 	if (retval < 0)
718 		goto exit;
719 
720 	for (i = 0; i < length; i++) {
721 		/* Fill data register */
722 		val = data[i];
723 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
724 		if (ret < 0) {
725 			retval = -EIO;
726 			goto exit;
727 		}
728 
729 		/* Send "write" command */
730 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
731 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
732 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
733 		if (ret < 0) {
734 			retval = -EIO;
735 			goto exit;
736 		}
737 
738 		retval = lan78xx_wait_eeprom(dev);
739 		if (retval < 0)
740 			goto exit;
741 
742 		offset++;
743 	}
744 
745 	retval = 0;
746 exit:
747 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
748 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
749 
750 	return retval;
751 }
752 
753 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
754 				u32 length, u8 *data)
755 {
756 	int i;
757 	int ret;
758 	u32 buf;
759 	unsigned long timeout;
760 
761 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
762 
763 	if (buf & OTP_PWR_DN_PWRDN_N_) {
764 		/* clear it and wait to be cleared */
765 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
766 
767 		timeout = jiffies + HZ;
768 		do {
769 			usleep_range(1, 10);
770 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
771 			if (time_after(jiffies, timeout)) {
772 				netdev_warn(dev->net,
773 					    "timeout on OTP_PWR_DN");
774 				return -EIO;
775 			}
776 		} while (buf & OTP_PWR_DN_PWRDN_N_);
777 	}
778 
779 	for (i = 0; i < length; i++) {
780 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
781 					((offset + i) >> 8) & OTP_ADDR1_15_11);
782 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
783 					((offset + i) & OTP_ADDR2_10_3));
784 
785 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
786 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
787 
788 		timeout = jiffies + HZ;
789 		do {
790 			udelay(1);
791 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
792 			if (time_after(jiffies, timeout)) {
793 				netdev_warn(dev->net,
794 					    "timeout on OTP_STATUS");
795 				return -EIO;
796 			}
797 		} while (buf & OTP_STATUS_BUSY_);
798 
799 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
800 
801 		data[i] = (u8)(buf & 0xFF);
802 	}
803 
804 	return 0;
805 }
806 
807 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
808 				 u32 length, u8 *data)
809 {
810 	int i;
811 	int ret;
812 	u32 buf;
813 	unsigned long timeout;
814 
815 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
816 
817 	if (buf & OTP_PWR_DN_PWRDN_N_) {
818 		/* clear it and wait to be cleared */
819 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
820 
821 		timeout = jiffies + HZ;
822 		do {
823 			udelay(1);
824 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
825 			if (time_after(jiffies, timeout)) {
826 				netdev_warn(dev->net,
827 					    "timeout on OTP_PWR_DN completion");
828 				return -EIO;
829 			}
830 		} while (buf & OTP_PWR_DN_PWRDN_N_);
831 	}
832 
833 	/* set to BYTE program mode */
834 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
835 
836 	for (i = 0; i < length; i++) {
837 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
838 					((offset + i) >> 8) & OTP_ADDR1_15_11);
839 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
840 					((offset + i) & OTP_ADDR2_10_3));
841 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
842 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
843 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
844 
845 		timeout = jiffies + HZ;
846 		do {
847 			udelay(1);
848 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
849 			if (time_after(jiffies, timeout)) {
850 				netdev_warn(dev->net,
851 					    "Timeout on OTP_STATUS completion");
852 				return -EIO;
853 			}
854 		} while (buf & OTP_STATUS_BUSY_);
855 	}
856 
857 	return 0;
858 }
859 
860 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
861 			    u32 length, u8 *data)
862 {
863 	u8 sig;
864 	int ret;
865 
866 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
867 
868 	if (ret == 0) {
869 		if (sig == OTP_INDICATOR_1)
870 			offset = offset;
871 		else if (sig == OTP_INDICATOR_2)
872 			offset += 0x100;
873 		else
874 			ret = -EINVAL;
875 		ret = lan78xx_read_raw_otp(dev, offset, length, data);
876 	}
877 
878 	return ret;
879 }
880 
881 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
882 {
883 	int i, ret;
884 
885 	for (i = 0; i < 100; i++) {
886 		u32 dp_sel;
887 
888 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
889 		if (unlikely(ret < 0))
890 			return -EIO;
891 
892 		if (dp_sel & DP_SEL_DPRDY_)
893 			return 0;
894 
895 		usleep_range(40, 100);
896 	}
897 
898 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
899 
900 	return -EIO;
901 }
902 
903 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
904 				  u32 addr, u32 length, u32 *buf)
905 {
906 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
907 	u32 dp_sel;
908 	int i, ret;
909 
910 	if (usb_autopm_get_interface(dev->intf) < 0)
911 			return 0;
912 
913 	mutex_lock(&pdata->dataport_mutex);
914 
915 	ret = lan78xx_dataport_wait_not_busy(dev);
916 	if (ret < 0)
917 		goto done;
918 
919 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
920 
921 	dp_sel &= ~DP_SEL_RSEL_MASK_;
922 	dp_sel |= ram_select;
923 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
924 
925 	for (i = 0; i < length; i++) {
926 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
927 
928 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
929 
930 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
931 
932 		ret = lan78xx_dataport_wait_not_busy(dev);
933 		if (ret < 0)
934 			goto done;
935 	}
936 
937 done:
938 	mutex_unlock(&pdata->dataport_mutex);
939 	usb_autopm_put_interface(dev->intf);
940 
941 	return ret;
942 }
943 
944 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
945 				    int index, u8 addr[ETH_ALEN])
946 {
947 	u32	temp;
948 
949 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
950 		temp = addr[3];
951 		temp = addr[2] | (temp << 8);
952 		temp = addr[1] | (temp << 8);
953 		temp = addr[0] | (temp << 8);
954 		pdata->pfilter_table[index][1] = temp;
955 		temp = addr[5];
956 		temp = addr[4] | (temp << 8);
957 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
958 		pdata->pfilter_table[index][0] = temp;
959 	}
960 }
961 
962 /* returns hash bit number for given MAC address */
963 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
964 {
965 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
966 }
967 
968 static void lan78xx_deferred_multicast_write(struct work_struct *param)
969 {
970 	struct lan78xx_priv *pdata =
971 			container_of(param, struct lan78xx_priv, set_multicast);
972 	struct lan78xx_net *dev = pdata->dev;
973 	int i;
974 	int ret;
975 
976 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
977 		  pdata->rfe_ctl);
978 
979 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
980 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
981 
982 	for (i = 1; i < NUM_OF_MAF; i++) {
983 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
984 		ret = lan78xx_write_reg(dev, MAF_LO(i),
985 					pdata->pfilter_table[i][1]);
986 		ret = lan78xx_write_reg(dev, MAF_HI(i),
987 					pdata->pfilter_table[i][0]);
988 	}
989 
990 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
991 }
992 
993 static void lan78xx_set_multicast(struct net_device *netdev)
994 {
995 	struct lan78xx_net *dev = netdev_priv(netdev);
996 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
997 	unsigned long flags;
998 	int i;
999 
1000 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1001 
1002 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1003 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1004 
1005 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1006 			pdata->mchash_table[i] = 0;
1007 	/* pfilter_table[0] has own HW address */
1008 	for (i = 1; i < NUM_OF_MAF; i++) {
1009 			pdata->pfilter_table[i][0] =
1010 			pdata->pfilter_table[i][1] = 0;
1011 	}
1012 
1013 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1014 
1015 	if (dev->net->flags & IFF_PROMISC) {
1016 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1017 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1018 	} else {
1019 		if (dev->net->flags & IFF_ALLMULTI) {
1020 			netif_dbg(dev, drv, dev->net,
1021 				  "receive all multicast enabled");
1022 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1023 		}
1024 	}
1025 
1026 	if (netdev_mc_count(dev->net)) {
1027 		struct netdev_hw_addr *ha;
1028 		int i;
1029 
1030 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1031 
1032 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1033 
1034 		i = 1;
1035 		netdev_for_each_mc_addr(ha, netdev) {
1036 			/* set first 32 into Perfect Filter */
1037 			if (i < 33) {
1038 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1039 			} else {
1040 				u32 bitnum = lan78xx_hash(ha->addr);
1041 
1042 				pdata->mchash_table[bitnum / 32] |=
1043 							(1 << (bitnum % 32));
1044 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1045 			}
1046 			i++;
1047 		}
1048 	}
1049 
1050 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1051 
1052 	/* defer register writes to a sleepable context */
1053 	schedule_work(&pdata->set_multicast);
1054 }
1055 
1056 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1057 				      u16 lcladv, u16 rmtadv)
1058 {
1059 	u32 flow = 0, fct_flow = 0;
1060 	int ret;
1061 	u8 cap;
1062 
1063 	if (dev->fc_autoneg)
1064 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1065 	else
1066 		cap = dev->fc_request_control;
1067 
1068 	if (cap & FLOW_CTRL_TX)
1069 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1070 
1071 	if (cap & FLOW_CTRL_RX)
1072 		flow |= FLOW_CR_RX_FCEN_;
1073 
1074 	if (dev->udev->speed == USB_SPEED_SUPER)
1075 		fct_flow = 0x817;
1076 	else if (dev->udev->speed == USB_SPEED_HIGH)
1077 		fct_flow = 0x211;
1078 
1079 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1080 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1081 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1082 
1083 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1084 
1085 	/* threshold value should be set before enabling flow */
1086 	ret = lan78xx_write_reg(dev, FLOW, flow);
1087 
1088 	return 0;
1089 }
1090 
1091 static int lan78xx_link_reset(struct lan78xx_net *dev)
1092 {
1093 	struct phy_device *phydev = dev->net->phydev;
1094 	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1095 	int ladv, radv, ret;
1096 	u32 buf;
1097 
1098 	/* clear PHY interrupt status */
1099 	ret = phy_read(phydev, LAN88XX_INT_STS);
1100 	if (unlikely(ret < 0))
1101 		return -EIO;
1102 
1103 	/* clear LAN78xx interrupt status */
1104 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1105 	if (unlikely(ret < 0))
1106 		return -EIO;
1107 
1108 	phy_read_status(phydev);
1109 
1110 	if (!phydev->link && dev->link_on) {
1111 		dev->link_on = false;
1112 
1113 		/* reset MAC */
1114 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1115 		if (unlikely(ret < 0))
1116 			return -EIO;
1117 		buf |= MAC_CR_RST_;
1118 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1119 		if (unlikely(ret < 0))
1120 			return -EIO;
1121 
1122 		phy_mac_interrupt(phydev, 0);
1123 
1124 		del_timer(&dev->stat_monitor);
1125 	} else if (phydev->link && !dev->link_on) {
1126 		dev->link_on = true;
1127 
1128 		phy_ethtool_gset(phydev, &ecmd);
1129 
1130 		ret = phy_read(phydev, LAN88XX_INT_STS);
1131 
1132 		if (dev->udev->speed == USB_SPEED_SUPER) {
1133 			if (ethtool_cmd_speed(&ecmd) == 1000) {
1134 				/* disable U2 */
1135 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1136 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1137 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1138 				/* enable U1 */
1139 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1140 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1141 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1142 			} else {
1143 				/* enable U1 & U2 */
1144 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1145 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1146 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1147 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1148 			}
1149 		}
1150 
1151 		ladv = phy_read(phydev, MII_ADVERTISE);
1152 		if (ladv < 0)
1153 			return ladv;
1154 
1155 		radv = phy_read(phydev, MII_LPA);
1156 		if (radv < 0)
1157 			return radv;
1158 
1159 		netif_dbg(dev, link, dev->net,
1160 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1161 			  ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1162 
1163 		ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1164 		phy_mac_interrupt(phydev, 1);
1165 
1166 		if (!timer_pending(&dev->stat_monitor)) {
1167 			dev->delta = 1;
1168 			mod_timer(&dev->stat_monitor,
1169 				  jiffies + STAT_UPDATE_TIMER);
1170 		}
1171 	}
1172 
1173 	return ret;
1174 }
1175 
1176 /* some work can't be done in tasklets, so we use keventd
1177  *
1178  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1179  * but tasklet_schedule() doesn't.	hope the failure is rare.
1180  */
1181 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1182 {
1183 	set_bit(work, &dev->flags);
1184 	if (!schedule_delayed_work(&dev->wq, 0))
1185 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1186 }
1187 
1188 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1189 {
1190 	u32 intdata;
1191 
1192 	if (urb->actual_length != 4) {
1193 		netdev_warn(dev->net,
1194 			    "unexpected urb length %d", urb->actual_length);
1195 		return;
1196 	}
1197 
1198 	memcpy(&intdata, urb->transfer_buffer, 4);
1199 	le32_to_cpus(&intdata);
1200 
1201 	if (intdata & INT_ENP_PHY_INT) {
1202 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1203 			  lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1204 	} else
1205 		netdev_warn(dev->net,
1206 			    "unexpected interrupt: 0x%08x\n", intdata);
1207 }
1208 
1209 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1210 {
1211 	return MAX_EEPROM_SIZE;
1212 }
1213 
1214 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1215 				      struct ethtool_eeprom *ee, u8 *data)
1216 {
1217 	struct lan78xx_net *dev = netdev_priv(netdev);
1218 
1219 	ee->magic = LAN78XX_EEPROM_MAGIC;
1220 
1221 	return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1222 }
1223 
1224 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1225 				      struct ethtool_eeprom *ee, u8 *data)
1226 {
1227 	struct lan78xx_net *dev = netdev_priv(netdev);
1228 
1229 	/* Allow entire eeprom update only */
1230 	if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1231 	    (ee->offset == 0) &&
1232 	    (ee->len == 512) &&
1233 	    (data[0] == EEPROM_INDICATOR))
1234 		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1235 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1236 		 (ee->offset == 0) &&
1237 		 (ee->len == 512) &&
1238 		 (data[0] == OTP_INDICATOR_1))
1239 		return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1240 
1241 	return -EINVAL;
1242 }
1243 
1244 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1245 				u8 *data)
1246 {
1247 	if (stringset == ETH_SS_STATS)
1248 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1249 }
1250 
1251 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1252 {
1253 	if (sset == ETH_SS_STATS)
1254 		return ARRAY_SIZE(lan78xx_gstrings);
1255 	else
1256 		return -EOPNOTSUPP;
1257 }
1258 
1259 static void lan78xx_get_stats(struct net_device *netdev,
1260 			      struct ethtool_stats *stats, u64 *data)
1261 {
1262 	struct lan78xx_net *dev = netdev_priv(netdev);
1263 
1264 	lan78xx_update_stats(dev);
1265 
1266 	mutex_lock(&dev->stats.access_lock);
1267 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1268 	mutex_unlock(&dev->stats.access_lock);
1269 }
1270 
1271 static void lan78xx_get_wol(struct net_device *netdev,
1272 			    struct ethtool_wolinfo *wol)
1273 {
1274 	struct lan78xx_net *dev = netdev_priv(netdev);
1275 	int ret;
1276 	u32 buf;
1277 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1278 
1279 	if (usb_autopm_get_interface(dev->intf) < 0)
1280 			return;
1281 
1282 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1283 	if (unlikely(ret < 0)) {
1284 		wol->supported = 0;
1285 		wol->wolopts = 0;
1286 	} else {
1287 		if (buf & USB_CFG_RMT_WKP_) {
1288 			wol->supported = WAKE_ALL;
1289 			wol->wolopts = pdata->wol;
1290 		} else {
1291 			wol->supported = 0;
1292 			wol->wolopts = 0;
1293 		}
1294 	}
1295 
1296 	usb_autopm_put_interface(dev->intf);
1297 }
1298 
1299 static int lan78xx_set_wol(struct net_device *netdev,
1300 			   struct ethtool_wolinfo *wol)
1301 {
1302 	struct lan78xx_net *dev = netdev_priv(netdev);
1303 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1304 	int ret;
1305 
1306 	ret = usb_autopm_get_interface(dev->intf);
1307 	if (ret < 0)
1308 		return ret;
1309 
1310 	pdata->wol = 0;
1311 	if (wol->wolopts & WAKE_UCAST)
1312 		pdata->wol |= WAKE_UCAST;
1313 	if (wol->wolopts & WAKE_MCAST)
1314 		pdata->wol |= WAKE_MCAST;
1315 	if (wol->wolopts & WAKE_BCAST)
1316 		pdata->wol |= WAKE_BCAST;
1317 	if (wol->wolopts & WAKE_MAGIC)
1318 		pdata->wol |= WAKE_MAGIC;
1319 	if (wol->wolopts & WAKE_PHY)
1320 		pdata->wol |= WAKE_PHY;
1321 	if (wol->wolopts & WAKE_ARP)
1322 		pdata->wol |= WAKE_ARP;
1323 
1324 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1325 
1326 	phy_ethtool_set_wol(netdev->phydev, wol);
1327 
1328 	usb_autopm_put_interface(dev->intf);
1329 
1330 	return ret;
1331 }
1332 
1333 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1334 {
1335 	struct lan78xx_net *dev = netdev_priv(net);
1336 	struct phy_device *phydev = net->phydev;
1337 	int ret;
1338 	u32 buf;
1339 
1340 	ret = usb_autopm_get_interface(dev->intf);
1341 	if (ret < 0)
1342 		return ret;
1343 
1344 	ret = phy_ethtool_get_eee(phydev, edata);
1345 	if (ret < 0)
1346 		goto exit;
1347 
1348 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1349 	if (buf & MAC_CR_EEE_EN_) {
1350 		edata->eee_enabled = true;
1351 		edata->eee_active = !!(edata->advertised &
1352 				       edata->lp_advertised);
1353 		edata->tx_lpi_enabled = true;
1354 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1355 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1356 		edata->tx_lpi_timer = buf;
1357 	} else {
1358 		edata->eee_enabled = false;
1359 		edata->eee_active = false;
1360 		edata->tx_lpi_enabled = false;
1361 		edata->tx_lpi_timer = 0;
1362 	}
1363 
1364 	ret = 0;
1365 exit:
1366 	usb_autopm_put_interface(dev->intf);
1367 
1368 	return ret;
1369 }
1370 
1371 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1372 {
1373 	struct lan78xx_net *dev = netdev_priv(net);
1374 	int ret;
1375 	u32 buf;
1376 
1377 	ret = usb_autopm_get_interface(dev->intf);
1378 	if (ret < 0)
1379 		return ret;
1380 
1381 	if (edata->eee_enabled) {
1382 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1383 		buf |= MAC_CR_EEE_EN_;
1384 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1385 
1386 		phy_ethtool_set_eee(net->phydev, edata);
1387 
1388 		buf = (u32)edata->tx_lpi_timer;
1389 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1390 	} else {
1391 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1392 		buf &= ~MAC_CR_EEE_EN_;
1393 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1394 	}
1395 
1396 	usb_autopm_put_interface(dev->intf);
1397 
1398 	return 0;
1399 }
1400 
1401 static u32 lan78xx_get_link(struct net_device *net)
1402 {
1403 	phy_read_status(net->phydev);
1404 
1405 	return net->phydev->link;
1406 }
1407 
1408 int lan78xx_nway_reset(struct net_device *net)
1409 {
1410 	return phy_start_aneg(net->phydev);
1411 }
1412 
1413 static void lan78xx_get_drvinfo(struct net_device *net,
1414 				struct ethtool_drvinfo *info)
1415 {
1416 	struct lan78xx_net *dev = netdev_priv(net);
1417 
1418 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1419 	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1420 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1421 }
1422 
1423 static u32 lan78xx_get_msglevel(struct net_device *net)
1424 {
1425 	struct lan78xx_net *dev = netdev_priv(net);
1426 
1427 	return dev->msg_enable;
1428 }
1429 
1430 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1431 {
1432 	struct lan78xx_net *dev = netdev_priv(net);
1433 
1434 	dev->msg_enable = level;
1435 }
1436 
1437 static int lan78xx_get_mdix_status(struct net_device *net)
1438 {
1439 	struct phy_device *phydev = net->phydev;
1440 	int buf;
1441 
1442 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1443 	buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1444 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1445 
1446 	return buf;
1447 }
1448 
1449 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1450 {
1451 	struct lan78xx_net *dev = netdev_priv(net);
1452 	struct phy_device *phydev = net->phydev;
1453 	int buf;
1454 
1455 	if (mdix_ctrl == ETH_TP_MDI) {
1456 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1457 			  LAN88XX_EXT_PAGE_SPACE_1);
1458 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1459 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1460 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1461 			  buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1462 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1463 			  LAN88XX_EXT_PAGE_SPACE_0);
1464 	} else if (mdix_ctrl == ETH_TP_MDI_X) {
1465 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1466 			  LAN88XX_EXT_PAGE_SPACE_1);
1467 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1468 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1469 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1470 			  buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1471 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1472 			  LAN88XX_EXT_PAGE_SPACE_0);
1473 	} else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1474 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1475 			  LAN88XX_EXT_PAGE_SPACE_1);
1476 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1477 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1478 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1479 			  buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1480 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1481 			  LAN88XX_EXT_PAGE_SPACE_0);
1482 	}
1483 	dev->mdix_ctrl = mdix_ctrl;
1484 }
1485 
1486 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1487 {
1488 	struct lan78xx_net *dev = netdev_priv(net);
1489 	struct phy_device *phydev = net->phydev;
1490 	int ret;
1491 	int buf;
1492 
1493 	ret = usb_autopm_get_interface(dev->intf);
1494 	if (ret < 0)
1495 		return ret;
1496 
1497 	ret = phy_ethtool_gset(phydev, cmd);
1498 
1499 	buf = lan78xx_get_mdix_status(net);
1500 
1501 	buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1502 	if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1503 		cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1504 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1505 	} else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1506 		cmd->eth_tp_mdix = ETH_TP_MDI;
1507 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1508 	} else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1509 		cmd->eth_tp_mdix = ETH_TP_MDI_X;
1510 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1511 	}
1512 
1513 	usb_autopm_put_interface(dev->intf);
1514 
1515 	return ret;
1516 }
1517 
1518 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1519 {
1520 	struct lan78xx_net *dev = netdev_priv(net);
1521 	struct phy_device *phydev = net->phydev;
1522 	int ret = 0;
1523 	int temp;
1524 
1525 	ret = usb_autopm_get_interface(dev->intf);
1526 	if (ret < 0)
1527 		return ret;
1528 
1529 	if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1530 		lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1531 	}
1532 
1533 	/* change speed & duplex */
1534 	ret = phy_ethtool_sset(phydev, cmd);
1535 
1536 	if (!cmd->autoneg) {
1537 		/* force link down */
1538 		temp = phy_read(phydev, MII_BMCR);
1539 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1540 		mdelay(1);
1541 		phy_write(phydev, MII_BMCR, temp);
1542 	}
1543 
1544 	usb_autopm_put_interface(dev->intf);
1545 
1546 	return ret;
1547 }
1548 
1549 static void lan78xx_get_pause(struct net_device *net,
1550 			      struct ethtool_pauseparam *pause)
1551 {
1552 	struct lan78xx_net *dev = netdev_priv(net);
1553 	struct phy_device *phydev = net->phydev;
1554 	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1555 
1556 	phy_ethtool_gset(phydev, &ecmd);
1557 
1558 	pause->autoneg = dev->fc_autoneg;
1559 
1560 	if (dev->fc_request_control & FLOW_CTRL_TX)
1561 		pause->tx_pause = 1;
1562 
1563 	if (dev->fc_request_control & FLOW_CTRL_RX)
1564 		pause->rx_pause = 1;
1565 }
1566 
1567 static int lan78xx_set_pause(struct net_device *net,
1568 			     struct ethtool_pauseparam *pause)
1569 {
1570 	struct lan78xx_net *dev = netdev_priv(net);
1571 	struct phy_device *phydev = net->phydev;
1572 	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1573 	int ret;
1574 
1575 	phy_ethtool_gset(phydev, &ecmd);
1576 
1577 	if (pause->autoneg && !ecmd.autoneg) {
1578 		ret = -EINVAL;
1579 		goto exit;
1580 	}
1581 
1582 	dev->fc_request_control = 0;
1583 	if (pause->rx_pause)
1584 		dev->fc_request_control |= FLOW_CTRL_RX;
1585 
1586 	if (pause->tx_pause)
1587 		dev->fc_request_control |= FLOW_CTRL_TX;
1588 
1589 	if (ecmd.autoneg) {
1590 		u32 mii_adv;
1591 
1592 		ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1593 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1594 		ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1595 		phy_ethtool_sset(phydev, &ecmd);
1596 	}
1597 
1598 	dev->fc_autoneg = pause->autoneg;
1599 
1600 	ret = 0;
1601 exit:
1602 	return ret;
1603 }
1604 
1605 static const struct ethtool_ops lan78xx_ethtool_ops = {
1606 	.get_link	= lan78xx_get_link,
1607 	.nway_reset	= lan78xx_nway_reset,
1608 	.get_drvinfo	= lan78xx_get_drvinfo,
1609 	.get_msglevel	= lan78xx_get_msglevel,
1610 	.set_msglevel	= lan78xx_set_msglevel,
1611 	.get_settings	= lan78xx_get_settings,
1612 	.set_settings	= lan78xx_set_settings,
1613 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1614 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1615 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1616 	.get_ethtool_stats = lan78xx_get_stats,
1617 	.get_sset_count = lan78xx_get_sset_count,
1618 	.get_strings	= lan78xx_get_strings,
1619 	.get_wol	= lan78xx_get_wol,
1620 	.set_wol	= lan78xx_set_wol,
1621 	.get_eee	= lan78xx_get_eee,
1622 	.set_eee	= lan78xx_set_eee,
1623 	.get_pauseparam	= lan78xx_get_pause,
1624 	.set_pauseparam	= lan78xx_set_pause,
1625 };
1626 
1627 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1628 {
1629 	if (!netif_running(netdev))
1630 		return -EINVAL;
1631 
1632 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1633 }
1634 
1635 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1636 {
1637 	u32 addr_lo, addr_hi;
1638 	int ret;
1639 	u8 addr[6];
1640 
1641 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1642 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1643 
1644 	addr[0] = addr_lo & 0xFF;
1645 	addr[1] = (addr_lo >> 8) & 0xFF;
1646 	addr[2] = (addr_lo >> 16) & 0xFF;
1647 	addr[3] = (addr_lo >> 24) & 0xFF;
1648 	addr[4] = addr_hi & 0xFF;
1649 	addr[5] = (addr_hi >> 8) & 0xFF;
1650 
1651 	if (!is_valid_ether_addr(addr)) {
1652 		/* reading mac address from EEPROM or OTP */
1653 		if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1654 					 addr) == 0) ||
1655 		    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1656 				      addr) == 0)) {
1657 			if (is_valid_ether_addr(addr)) {
1658 				/* eeprom values are valid so use them */
1659 				netif_dbg(dev, ifup, dev->net,
1660 					  "MAC address read from EEPROM");
1661 			} else {
1662 				/* generate random MAC */
1663 				random_ether_addr(addr);
1664 				netif_dbg(dev, ifup, dev->net,
1665 					  "MAC address set to random addr");
1666 			}
1667 
1668 			addr_lo = addr[0] | (addr[1] << 8) |
1669 				  (addr[2] << 16) | (addr[3] << 24);
1670 			addr_hi = addr[4] | (addr[5] << 8);
1671 
1672 			ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1673 			ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1674 		} else {
1675 			/* generate random MAC */
1676 			random_ether_addr(addr);
1677 			netif_dbg(dev, ifup, dev->net,
1678 				  "MAC address set to random addr");
1679 		}
1680 	}
1681 
1682 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1683 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1684 
1685 	ether_addr_copy(dev->net->dev_addr, addr);
1686 }
1687 
1688 /* MDIO read and write wrappers for phylib */
1689 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1690 {
1691 	struct lan78xx_net *dev = bus->priv;
1692 	u32 val, addr;
1693 	int ret;
1694 
1695 	ret = usb_autopm_get_interface(dev->intf);
1696 	if (ret < 0)
1697 		return ret;
1698 
1699 	mutex_lock(&dev->phy_mutex);
1700 
1701 	/* confirm MII not busy */
1702 	ret = lan78xx_phy_wait_not_busy(dev);
1703 	if (ret < 0)
1704 		goto done;
1705 
1706 	/* set the address, index & direction (read from PHY) */
1707 	addr = mii_access(phy_id, idx, MII_READ);
1708 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1709 
1710 	ret = lan78xx_phy_wait_not_busy(dev);
1711 	if (ret < 0)
1712 		goto done;
1713 
1714 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1715 
1716 	ret = (int)(val & 0xFFFF);
1717 
1718 done:
1719 	mutex_unlock(&dev->phy_mutex);
1720 	usb_autopm_put_interface(dev->intf);
1721 	return ret;
1722 }
1723 
1724 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1725 				 u16 regval)
1726 {
1727 	struct lan78xx_net *dev = bus->priv;
1728 	u32 val, addr;
1729 	int ret;
1730 
1731 	ret = usb_autopm_get_interface(dev->intf);
1732 	if (ret < 0)
1733 		return ret;
1734 
1735 	mutex_lock(&dev->phy_mutex);
1736 
1737 	/* confirm MII not busy */
1738 	ret = lan78xx_phy_wait_not_busy(dev);
1739 	if (ret < 0)
1740 		goto done;
1741 
1742 	val = (u32)regval;
1743 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1744 
1745 	/* set the address, index & direction (write to PHY) */
1746 	addr = mii_access(phy_id, idx, MII_WRITE);
1747 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1748 
1749 	ret = lan78xx_phy_wait_not_busy(dev);
1750 	if (ret < 0)
1751 		goto done;
1752 
1753 done:
1754 	mutex_unlock(&dev->phy_mutex);
1755 	usb_autopm_put_interface(dev->intf);
1756 	return 0;
1757 }
1758 
1759 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1760 {
1761 	int ret;
1762 
1763 	dev->mdiobus = mdiobus_alloc();
1764 	if (!dev->mdiobus) {
1765 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1766 		return -ENOMEM;
1767 	}
1768 
1769 	dev->mdiobus->priv = (void *)dev;
1770 	dev->mdiobus->read = lan78xx_mdiobus_read;
1771 	dev->mdiobus->write = lan78xx_mdiobus_write;
1772 	dev->mdiobus->name = "lan78xx-mdiobus";
1773 
1774 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1775 		 dev->udev->bus->busnum, dev->udev->devnum);
1776 
1777 	switch (dev->chipid) {
1778 	case ID_REV_CHIP_ID_7800_:
1779 	case ID_REV_CHIP_ID_7850_:
1780 		/* set to internal PHY id */
1781 		dev->mdiobus->phy_mask = ~(1 << 1);
1782 		break;
1783 	}
1784 
1785 	ret = mdiobus_register(dev->mdiobus);
1786 	if (ret) {
1787 		netdev_err(dev->net, "can't register MDIO bus\n");
1788 		goto exit1;
1789 	}
1790 
1791 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1792 	return 0;
1793 exit1:
1794 	mdiobus_free(dev->mdiobus);
1795 	return ret;
1796 }
1797 
1798 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1799 {
1800 	mdiobus_unregister(dev->mdiobus);
1801 	mdiobus_free(dev->mdiobus);
1802 }
1803 
1804 static void lan78xx_link_status_change(struct net_device *net)
1805 {
1806 	/* nothing to do */
1807 }
1808 
1809 static int lan78xx_phy_init(struct lan78xx_net *dev)
1810 {
1811 	int ret;
1812 	u32 mii_adv;
1813 	struct phy_device *phydev = dev->net->phydev;
1814 
1815 	phydev = phy_find_first(dev->mdiobus);
1816 	if (!phydev) {
1817 		netdev_err(dev->net, "no PHY found\n");
1818 		return -EIO;
1819 	}
1820 
1821 	/* Enable PHY interrupts.
1822 	 * We handle our own interrupt
1823 	 */
1824 	ret = phy_read(phydev, LAN88XX_INT_STS);
1825 	ret = phy_write(phydev, LAN88XX_INT_MASK,
1826 			LAN88XX_INT_MASK_MDINTPIN_EN_ |
1827 			LAN88XX_INT_MASK_LINK_CHANGE_);
1828 
1829 	phydev->irq = PHY_IGNORE_INTERRUPT;
1830 
1831 	ret = phy_connect_direct(dev->net, phydev,
1832 				 lan78xx_link_status_change,
1833 				 PHY_INTERFACE_MODE_GMII);
1834 	if (ret) {
1835 		netdev_err(dev->net, "can't attach PHY to %s\n",
1836 			   dev->mdiobus->id);
1837 		return -EIO;
1838 	}
1839 
1840 	/* set to AUTOMDIX */
1841 	lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1842 
1843 	/* MAC doesn't support 1000T Half */
1844 	phydev->supported &= ~SUPPORTED_1000baseT_Half;
1845 
1846 	/* support both flow controls */
1847 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1848 	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1849 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1850 	phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1851 
1852 	genphy_config_aneg(phydev);
1853 
1854 	dev->fc_autoneg = phydev->autoneg;
1855 
1856 	phy_start(phydev);
1857 
1858 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1859 
1860 	return 0;
1861 }
1862 
1863 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1864 {
1865 	int ret = 0;
1866 	u32 buf;
1867 	bool rxenabled;
1868 
1869 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1870 
1871 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1872 
1873 	if (rxenabled) {
1874 		buf &= ~MAC_RX_RXEN_;
1875 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1876 	}
1877 
1878 	/* add 4 to size for FCS */
1879 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
1880 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1881 
1882 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
1883 
1884 	if (rxenabled) {
1885 		buf |= MAC_RX_RXEN_;
1886 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1887 	}
1888 
1889 	return 0;
1890 }
1891 
1892 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1893 {
1894 	struct sk_buff *skb;
1895 	unsigned long flags;
1896 	int count = 0;
1897 
1898 	spin_lock_irqsave(&q->lock, flags);
1899 	while (!skb_queue_empty(q)) {
1900 		struct skb_data	*entry;
1901 		struct urb *urb;
1902 		int ret;
1903 
1904 		skb_queue_walk(q, skb) {
1905 			entry = (struct skb_data *)skb->cb;
1906 			if (entry->state != unlink_start)
1907 				goto found;
1908 		}
1909 		break;
1910 found:
1911 		entry->state = unlink_start;
1912 		urb = entry->urb;
1913 
1914 		/* Get reference count of the URB to avoid it to be
1915 		 * freed during usb_unlink_urb, which may trigger
1916 		 * use-after-free problem inside usb_unlink_urb since
1917 		 * usb_unlink_urb is always racing with .complete
1918 		 * handler(include defer_bh).
1919 		 */
1920 		usb_get_urb(urb);
1921 		spin_unlock_irqrestore(&q->lock, flags);
1922 		/* during some PM-driven resume scenarios,
1923 		 * these (async) unlinks complete immediately
1924 		 */
1925 		ret = usb_unlink_urb(urb);
1926 		if (ret != -EINPROGRESS && ret != 0)
1927 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1928 		else
1929 			count++;
1930 		usb_put_urb(urb);
1931 		spin_lock_irqsave(&q->lock, flags);
1932 	}
1933 	spin_unlock_irqrestore(&q->lock, flags);
1934 	return count;
1935 }
1936 
1937 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1938 {
1939 	struct lan78xx_net *dev = netdev_priv(netdev);
1940 	int ll_mtu = new_mtu + netdev->hard_header_len;
1941 	int old_hard_mtu = dev->hard_mtu;
1942 	int old_rx_urb_size = dev->rx_urb_size;
1943 	int ret;
1944 
1945 	if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1946 		return -EINVAL;
1947 
1948 	if (new_mtu <= 0)
1949 		return -EINVAL;
1950 	/* no second zero-length packet read wanted after mtu-sized packets */
1951 	if ((ll_mtu % dev->maxpacket) == 0)
1952 		return -EDOM;
1953 
1954 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1955 
1956 	netdev->mtu = new_mtu;
1957 
1958 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1959 	if (dev->rx_urb_size == old_hard_mtu) {
1960 		dev->rx_urb_size = dev->hard_mtu;
1961 		if (dev->rx_urb_size > old_rx_urb_size) {
1962 			if (netif_running(dev->net)) {
1963 				unlink_urbs(dev, &dev->rxq);
1964 				tasklet_schedule(&dev->bh);
1965 			}
1966 		}
1967 	}
1968 
1969 	return 0;
1970 }
1971 
1972 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1973 {
1974 	struct lan78xx_net *dev = netdev_priv(netdev);
1975 	struct sockaddr *addr = p;
1976 	u32 addr_lo, addr_hi;
1977 	int ret;
1978 
1979 	if (netif_running(netdev))
1980 		return -EBUSY;
1981 
1982 	if (!is_valid_ether_addr(addr->sa_data))
1983 		return -EADDRNOTAVAIL;
1984 
1985 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
1986 
1987 	addr_lo = netdev->dev_addr[0] |
1988 		  netdev->dev_addr[1] << 8 |
1989 		  netdev->dev_addr[2] << 16 |
1990 		  netdev->dev_addr[3] << 24;
1991 	addr_hi = netdev->dev_addr[4] |
1992 		  netdev->dev_addr[5] << 8;
1993 
1994 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1995 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1996 
1997 	return 0;
1998 }
1999 
2000 /* Enable or disable Rx checksum offload engine */
2001 static int lan78xx_set_features(struct net_device *netdev,
2002 				netdev_features_t features)
2003 {
2004 	struct lan78xx_net *dev = netdev_priv(netdev);
2005 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2006 	unsigned long flags;
2007 	int ret;
2008 
2009 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2010 
2011 	if (features & NETIF_F_RXCSUM) {
2012 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2013 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2014 	} else {
2015 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2016 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2017 	}
2018 
2019 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2020 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2021 	else
2022 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2023 
2024 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2025 
2026 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2027 
2028 	return 0;
2029 }
2030 
2031 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2032 {
2033 	struct lan78xx_priv *pdata =
2034 			container_of(param, struct lan78xx_priv, set_vlan);
2035 	struct lan78xx_net *dev = pdata->dev;
2036 
2037 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2038 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2039 }
2040 
2041 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2042 				   __be16 proto, u16 vid)
2043 {
2044 	struct lan78xx_net *dev = netdev_priv(netdev);
2045 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2046 	u16 vid_bit_index;
2047 	u16 vid_dword_index;
2048 
2049 	vid_dword_index = (vid >> 5) & 0x7F;
2050 	vid_bit_index = vid & 0x1F;
2051 
2052 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2053 
2054 	/* defer register writes to a sleepable context */
2055 	schedule_work(&pdata->set_vlan);
2056 
2057 	return 0;
2058 }
2059 
2060 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2061 				    __be16 proto, u16 vid)
2062 {
2063 	struct lan78xx_net *dev = netdev_priv(netdev);
2064 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2065 	u16 vid_bit_index;
2066 	u16 vid_dword_index;
2067 
2068 	vid_dword_index = (vid >> 5) & 0x7F;
2069 	vid_bit_index = vid & 0x1F;
2070 
2071 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2072 
2073 	/* defer register writes to a sleepable context */
2074 	schedule_work(&pdata->set_vlan);
2075 
2076 	return 0;
2077 }
2078 
2079 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2080 {
2081 	int ret;
2082 	u32 buf;
2083 	u32 regs[6] = { 0 };
2084 
2085 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2086 	if (buf & USB_CFG1_LTM_ENABLE_) {
2087 		u8 temp[2];
2088 		/* Get values from EEPROM first */
2089 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2090 			if (temp[0] == 24) {
2091 				ret = lan78xx_read_raw_eeprom(dev,
2092 							      temp[1] * 2,
2093 							      24,
2094 							      (u8 *)regs);
2095 				if (ret < 0)
2096 					return;
2097 			}
2098 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2099 			if (temp[0] == 24) {
2100 				ret = lan78xx_read_raw_otp(dev,
2101 							   temp[1] * 2,
2102 							   24,
2103 							   (u8 *)regs);
2104 				if (ret < 0)
2105 					return;
2106 			}
2107 		}
2108 	}
2109 
2110 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2111 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2112 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2113 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2114 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2115 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2116 }
2117 
2118 static int lan78xx_reset(struct lan78xx_net *dev)
2119 {
2120 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2121 	u32 buf;
2122 	int ret = 0;
2123 	unsigned long timeout;
2124 
2125 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2126 	buf |= HW_CFG_LRST_;
2127 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2128 
2129 	timeout = jiffies + HZ;
2130 	do {
2131 		mdelay(1);
2132 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2133 		if (time_after(jiffies, timeout)) {
2134 			netdev_warn(dev->net,
2135 				    "timeout on completion of LiteReset");
2136 			return -EIO;
2137 		}
2138 	} while (buf & HW_CFG_LRST_);
2139 
2140 	lan78xx_init_mac_address(dev);
2141 
2142 	/* save DEVID for later usage */
2143 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2144 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2145 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2146 
2147 	/* Respond to the IN token with a NAK */
2148 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2149 	buf |= USB_CFG_BIR_;
2150 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2151 
2152 	/* Init LTM */
2153 	lan78xx_init_ltm(dev);
2154 
2155 	dev->net->hard_header_len += TX_OVERHEAD;
2156 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2157 
2158 	if (dev->udev->speed == USB_SPEED_SUPER) {
2159 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2160 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2161 		dev->rx_qlen = 4;
2162 		dev->tx_qlen = 4;
2163 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2164 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2165 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2166 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2167 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2168 	} else {
2169 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2170 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2171 		dev->rx_qlen = 4;
2172 	}
2173 
2174 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2175 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2176 
2177 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2178 	buf |= HW_CFG_MEF_;
2179 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2180 
2181 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2182 	buf |= USB_CFG_BCE_;
2183 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2184 
2185 	/* set FIFO sizes */
2186 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2187 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2188 
2189 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2190 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2191 
2192 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2193 	ret = lan78xx_write_reg(dev, FLOW, 0);
2194 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2195 
2196 	/* Don't need rfe_ctl_lock during initialisation */
2197 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2198 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2199 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2200 
2201 	/* Enable or disable checksum offload engines */
2202 	lan78xx_set_features(dev->net, dev->net->features);
2203 
2204 	lan78xx_set_multicast(dev->net);
2205 
2206 	/* reset PHY */
2207 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2208 	buf |= PMT_CTL_PHY_RST_;
2209 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2210 
2211 	timeout = jiffies + HZ;
2212 	do {
2213 		mdelay(1);
2214 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2215 		if (time_after(jiffies, timeout)) {
2216 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2217 			return -EIO;
2218 		}
2219 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2220 
2221 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2222 	buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2223 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2224 
2225 	/* enable PHY interrupts */
2226 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2227 	buf |= INT_ENP_PHY_INT;
2228 	ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2229 
2230 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2231 	buf |= MAC_TX_TXEN_;
2232 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2233 
2234 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2235 	buf |= FCT_TX_CTL_EN_;
2236 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2237 
2238 	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2239 
2240 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2241 	buf |= MAC_RX_RXEN_;
2242 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2243 
2244 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2245 	buf |= FCT_RX_CTL_EN_;
2246 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2247 
2248 	return 0;
2249 }
2250 
2251 static void lan78xx_init_stats(struct lan78xx_net *dev)
2252 {
2253 	u32 *p;
2254 	int i;
2255 
2256 	/* initialize for stats update
2257 	 * some counters are 20bits and some are 32bits
2258 	 */
2259 	p = (u32 *)&dev->stats.rollover_max;
2260 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2261 		p[i] = 0xFFFFF;
2262 
2263 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2264 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2265 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2266 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2267 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2268 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2269 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2270 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2271 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2272 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2273 
2274 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2275 }
2276 
2277 static int lan78xx_open(struct net_device *net)
2278 {
2279 	struct lan78xx_net *dev = netdev_priv(net);
2280 	int ret;
2281 
2282 	ret = usb_autopm_get_interface(dev->intf);
2283 	if (ret < 0)
2284 		goto out;
2285 
2286 	ret = lan78xx_reset(dev);
2287 	if (ret < 0)
2288 		goto done;
2289 
2290 	ret = lan78xx_phy_init(dev);
2291 	if (ret < 0)
2292 		goto done;
2293 
2294 	/* for Link Check */
2295 	if (dev->urb_intr) {
2296 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2297 		if (ret < 0) {
2298 			netif_err(dev, ifup, dev->net,
2299 				  "intr submit %d\n", ret);
2300 			goto done;
2301 		}
2302 	}
2303 
2304 	lan78xx_init_stats(dev);
2305 
2306 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2307 
2308 	netif_start_queue(net);
2309 
2310 	dev->link_on = false;
2311 
2312 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2313 done:
2314 	usb_autopm_put_interface(dev->intf);
2315 
2316 out:
2317 	return ret;
2318 }
2319 
2320 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2321 {
2322 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2323 	DECLARE_WAITQUEUE(wait, current);
2324 	int temp;
2325 
2326 	/* ensure there are no more active urbs */
2327 	add_wait_queue(&unlink_wakeup, &wait);
2328 	set_current_state(TASK_UNINTERRUPTIBLE);
2329 	dev->wait = &unlink_wakeup;
2330 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2331 
2332 	/* maybe wait for deletions to finish. */
2333 	while (!skb_queue_empty(&dev->rxq) &&
2334 	       !skb_queue_empty(&dev->txq) &&
2335 	       !skb_queue_empty(&dev->done)) {
2336 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2337 		set_current_state(TASK_UNINTERRUPTIBLE);
2338 		netif_dbg(dev, ifdown, dev->net,
2339 			  "waited for %d urb completions\n", temp);
2340 	}
2341 	set_current_state(TASK_RUNNING);
2342 	dev->wait = NULL;
2343 	remove_wait_queue(&unlink_wakeup, &wait);
2344 }
2345 
2346 int lan78xx_stop(struct net_device *net)
2347 {
2348 	struct lan78xx_net		*dev = netdev_priv(net);
2349 
2350 	if (timer_pending(&dev->stat_monitor))
2351 		del_timer_sync(&dev->stat_monitor);
2352 
2353 	phy_stop(net->phydev);
2354 	phy_disconnect(net->phydev);
2355 	net->phydev = NULL;
2356 
2357 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2358 	netif_stop_queue(net);
2359 
2360 	netif_info(dev, ifdown, dev->net,
2361 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2362 		   net->stats.rx_packets, net->stats.tx_packets,
2363 		   net->stats.rx_errors, net->stats.tx_errors);
2364 
2365 	lan78xx_terminate_urbs(dev);
2366 
2367 	usb_kill_urb(dev->urb_intr);
2368 
2369 	skb_queue_purge(&dev->rxq_pause);
2370 
2371 	/* deferred work (task, timer, softirq) must also stop.
2372 	 * can't flush_scheduled_work() until we drop rtnl (later),
2373 	 * else workers could deadlock; so make workers a NOP.
2374 	 */
2375 	dev->flags = 0;
2376 	cancel_delayed_work_sync(&dev->wq);
2377 	tasklet_kill(&dev->bh);
2378 
2379 	usb_autopm_put_interface(dev->intf);
2380 
2381 	return 0;
2382 }
2383 
2384 static int lan78xx_linearize(struct sk_buff *skb)
2385 {
2386 	return skb_linearize(skb);
2387 }
2388 
2389 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2390 				       struct sk_buff *skb, gfp_t flags)
2391 {
2392 	u32 tx_cmd_a, tx_cmd_b;
2393 
2394 	if (skb_headroom(skb) < TX_OVERHEAD) {
2395 		struct sk_buff *skb2;
2396 
2397 		skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2398 		dev_kfree_skb_any(skb);
2399 		skb = skb2;
2400 		if (!skb)
2401 			return NULL;
2402 	}
2403 
2404 	if (lan78xx_linearize(skb) < 0)
2405 		return NULL;
2406 
2407 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2408 
2409 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2410 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2411 
2412 	tx_cmd_b = 0;
2413 	if (skb_is_gso(skb)) {
2414 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2415 
2416 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2417 
2418 		tx_cmd_a |= TX_CMD_A_LSO_;
2419 	}
2420 
2421 	if (skb_vlan_tag_present(skb)) {
2422 		tx_cmd_a |= TX_CMD_A_IVTG_;
2423 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2424 	}
2425 
2426 	skb_push(skb, 4);
2427 	cpu_to_le32s(&tx_cmd_b);
2428 	memcpy(skb->data, &tx_cmd_b, 4);
2429 
2430 	skb_push(skb, 4);
2431 	cpu_to_le32s(&tx_cmd_a);
2432 	memcpy(skb->data, &tx_cmd_a, 4);
2433 
2434 	return skb;
2435 }
2436 
2437 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2438 			       struct sk_buff_head *list, enum skb_state state)
2439 {
2440 	unsigned long flags;
2441 	enum skb_state old_state;
2442 	struct skb_data *entry = (struct skb_data *)skb->cb;
2443 
2444 	spin_lock_irqsave(&list->lock, flags);
2445 	old_state = entry->state;
2446 	entry->state = state;
2447 
2448 	__skb_unlink(skb, list);
2449 	spin_unlock(&list->lock);
2450 	spin_lock(&dev->done.lock);
2451 
2452 	__skb_queue_tail(&dev->done, skb);
2453 	if (skb_queue_len(&dev->done) == 1)
2454 		tasklet_schedule(&dev->bh);
2455 	spin_unlock_irqrestore(&dev->done.lock, flags);
2456 
2457 	return old_state;
2458 }
2459 
2460 static void tx_complete(struct urb *urb)
2461 {
2462 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2463 	struct skb_data *entry = (struct skb_data *)skb->cb;
2464 	struct lan78xx_net *dev = entry->dev;
2465 
2466 	if (urb->status == 0) {
2467 		dev->net->stats.tx_packets++;
2468 		dev->net->stats.tx_bytes += entry->length;
2469 	} else {
2470 		dev->net->stats.tx_errors++;
2471 
2472 		switch (urb->status) {
2473 		case -EPIPE:
2474 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2475 			break;
2476 
2477 		/* software-driven interface shutdown */
2478 		case -ECONNRESET:
2479 		case -ESHUTDOWN:
2480 			break;
2481 
2482 		case -EPROTO:
2483 		case -ETIME:
2484 		case -EILSEQ:
2485 			netif_stop_queue(dev->net);
2486 			break;
2487 		default:
2488 			netif_dbg(dev, tx_err, dev->net,
2489 				  "tx err %d\n", entry->urb->status);
2490 			break;
2491 		}
2492 	}
2493 
2494 	usb_autopm_put_interface_async(dev->intf);
2495 
2496 	defer_bh(dev, skb, &dev->txq, tx_done);
2497 }
2498 
2499 static void lan78xx_queue_skb(struct sk_buff_head *list,
2500 			      struct sk_buff *newsk, enum skb_state state)
2501 {
2502 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2503 
2504 	__skb_queue_tail(list, newsk);
2505 	entry->state = state;
2506 }
2507 
2508 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2509 {
2510 	struct lan78xx_net *dev = netdev_priv(net);
2511 	struct sk_buff *skb2 = NULL;
2512 
2513 	if (skb) {
2514 		skb_tx_timestamp(skb);
2515 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2516 	}
2517 
2518 	if (skb2) {
2519 		skb_queue_tail(&dev->txq_pend, skb2);
2520 
2521 		/* throttle TX patch at slower than SUPER SPEED USB */
2522 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2523 		    (skb_queue_len(&dev->txq_pend) > 10))
2524 			netif_stop_queue(net);
2525 	} else {
2526 		netif_dbg(dev, tx_err, dev->net,
2527 			  "lan78xx_tx_prep return NULL\n");
2528 		dev->net->stats.tx_errors++;
2529 		dev->net->stats.tx_dropped++;
2530 	}
2531 
2532 	tasklet_schedule(&dev->bh);
2533 
2534 	return NETDEV_TX_OK;
2535 }
2536 
2537 int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2538 {
2539 	int tmp;
2540 	struct usb_host_interface *alt = NULL;
2541 	struct usb_host_endpoint *in = NULL, *out = NULL;
2542 	struct usb_host_endpoint *status = NULL;
2543 
2544 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2545 		unsigned ep;
2546 
2547 		in = NULL;
2548 		out = NULL;
2549 		status = NULL;
2550 		alt = intf->altsetting + tmp;
2551 
2552 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2553 			struct usb_host_endpoint *e;
2554 			int intr = 0;
2555 
2556 			e = alt->endpoint + ep;
2557 			switch (e->desc.bmAttributes) {
2558 			case USB_ENDPOINT_XFER_INT:
2559 				if (!usb_endpoint_dir_in(&e->desc))
2560 					continue;
2561 				intr = 1;
2562 				/* FALLTHROUGH */
2563 			case USB_ENDPOINT_XFER_BULK:
2564 				break;
2565 			default:
2566 				continue;
2567 			}
2568 			if (usb_endpoint_dir_in(&e->desc)) {
2569 				if (!intr && !in)
2570 					in = e;
2571 				else if (intr && !status)
2572 					status = e;
2573 			} else {
2574 				if (!out)
2575 					out = e;
2576 			}
2577 		}
2578 		if (in && out)
2579 			break;
2580 	}
2581 	if (!alt || !in || !out)
2582 		return -EINVAL;
2583 
2584 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2585 				       in->desc.bEndpointAddress &
2586 				       USB_ENDPOINT_NUMBER_MASK);
2587 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2588 					out->desc.bEndpointAddress &
2589 					USB_ENDPOINT_NUMBER_MASK);
2590 	dev->ep_intr = status;
2591 
2592 	return 0;
2593 }
2594 
2595 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2596 {
2597 	struct lan78xx_priv *pdata = NULL;
2598 	int ret;
2599 	int i;
2600 
2601 	ret = lan78xx_get_endpoints(dev, intf);
2602 
2603 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2604 
2605 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2606 	if (!pdata) {
2607 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2608 		return -ENOMEM;
2609 	}
2610 
2611 	pdata->dev = dev;
2612 
2613 	spin_lock_init(&pdata->rfe_ctl_lock);
2614 	mutex_init(&pdata->dataport_mutex);
2615 
2616 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2617 
2618 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2619 		pdata->vlan_table[i] = 0;
2620 
2621 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2622 
2623 	dev->net->features = 0;
2624 
2625 	if (DEFAULT_TX_CSUM_ENABLE)
2626 		dev->net->features |= NETIF_F_HW_CSUM;
2627 
2628 	if (DEFAULT_RX_CSUM_ENABLE)
2629 		dev->net->features |= NETIF_F_RXCSUM;
2630 
2631 	if (DEFAULT_TSO_CSUM_ENABLE)
2632 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2633 
2634 	dev->net->hw_features = dev->net->features;
2635 
2636 	/* Init all registers */
2637 	ret = lan78xx_reset(dev);
2638 
2639 	lan78xx_mdio_init(dev);
2640 
2641 	dev->net->flags |= IFF_MULTICAST;
2642 
2643 	pdata->wol = WAKE_MAGIC;
2644 
2645 	return 0;
2646 }
2647 
2648 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2649 {
2650 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2651 
2652 	lan78xx_remove_mdio(dev);
2653 
2654 	if (pdata) {
2655 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2656 		kfree(pdata);
2657 		pdata = NULL;
2658 		dev->data[0] = 0;
2659 	}
2660 }
2661 
2662 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2663 				    struct sk_buff *skb,
2664 				    u32 rx_cmd_a, u32 rx_cmd_b)
2665 {
2666 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2667 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2668 		skb->ip_summed = CHECKSUM_NONE;
2669 	} else {
2670 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2671 		skb->ip_summed = CHECKSUM_COMPLETE;
2672 	}
2673 }
2674 
2675 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2676 {
2677 	int		status;
2678 
2679 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2680 		skb_queue_tail(&dev->rxq_pause, skb);
2681 		return;
2682 	}
2683 
2684 	skb->protocol = eth_type_trans(skb, dev->net);
2685 	dev->net->stats.rx_packets++;
2686 	dev->net->stats.rx_bytes += skb->len;
2687 
2688 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2689 		  skb->len + sizeof(struct ethhdr), skb->protocol);
2690 	memset(skb->cb, 0, sizeof(struct skb_data));
2691 
2692 	if (skb_defer_rx_timestamp(skb))
2693 		return;
2694 
2695 	status = netif_rx(skb);
2696 	if (status != NET_RX_SUCCESS)
2697 		netif_dbg(dev, rx_err, dev->net,
2698 			  "netif_rx status %d\n", status);
2699 }
2700 
2701 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2702 {
2703 	if (skb->len < dev->net->hard_header_len)
2704 		return 0;
2705 
2706 	while (skb->len > 0) {
2707 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
2708 		u16 rx_cmd_c;
2709 		struct sk_buff *skb2;
2710 		unsigned char *packet;
2711 
2712 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2713 		le32_to_cpus(&rx_cmd_a);
2714 		skb_pull(skb, sizeof(rx_cmd_a));
2715 
2716 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2717 		le32_to_cpus(&rx_cmd_b);
2718 		skb_pull(skb, sizeof(rx_cmd_b));
2719 
2720 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2721 		le16_to_cpus(&rx_cmd_c);
2722 		skb_pull(skb, sizeof(rx_cmd_c));
2723 
2724 		packet = skb->data;
2725 
2726 		/* get the packet length */
2727 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2728 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2729 
2730 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2731 			netif_dbg(dev, rx_err, dev->net,
2732 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
2733 		} else {
2734 			/* last frame in this batch */
2735 			if (skb->len == size) {
2736 				lan78xx_rx_csum_offload(dev, skb,
2737 							rx_cmd_a, rx_cmd_b);
2738 
2739 				skb_trim(skb, skb->len - 4); /* remove fcs */
2740 				skb->truesize = size + sizeof(struct sk_buff);
2741 
2742 				return 1;
2743 			}
2744 
2745 			skb2 = skb_clone(skb, GFP_ATOMIC);
2746 			if (unlikely(!skb2)) {
2747 				netdev_warn(dev->net, "Error allocating skb");
2748 				return 0;
2749 			}
2750 
2751 			skb2->len = size;
2752 			skb2->data = packet;
2753 			skb_set_tail_pointer(skb2, size);
2754 
2755 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2756 
2757 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
2758 			skb2->truesize = size + sizeof(struct sk_buff);
2759 
2760 			lan78xx_skb_return(dev, skb2);
2761 		}
2762 
2763 		skb_pull(skb, size);
2764 
2765 		/* padding bytes before the next frame starts */
2766 		if (skb->len)
2767 			skb_pull(skb, align_count);
2768 	}
2769 
2770 	return 1;
2771 }
2772 
2773 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2774 {
2775 	if (!lan78xx_rx(dev, skb)) {
2776 		dev->net->stats.rx_errors++;
2777 		goto done;
2778 	}
2779 
2780 	if (skb->len) {
2781 		lan78xx_skb_return(dev, skb);
2782 		return;
2783 	}
2784 
2785 	netif_dbg(dev, rx_err, dev->net, "drop\n");
2786 	dev->net->stats.rx_errors++;
2787 done:
2788 	skb_queue_tail(&dev->done, skb);
2789 }
2790 
2791 static void rx_complete(struct urb *urb);
2792 
2793 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2794 {
2795 	struct sk_buff *skb;
2796 	struct skb_data *entry;
2797 	unsigned long lockflags;
2798 	size_t size = dev->rx_urb_size;
2799 	int ret = 0;
2800 
2801 	skb = netdev_alloc_skb_ip_align(dev->net, size);
2802 	if (!skb) {
2803 		usb_free_urb(urb);
2804 		return -ENOMEM;
2805 	}
2806 
2807 	entry = (struct skb_data *)skb->cb;
2808 	entry->urb = urb;
2809 	entry->dev = dev;
2810 	entry->length = 0;
2811 
2812 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2813 			  skb->data, size, rx_complete, skb);
2814 
2815 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
2816 
2817 	if (netif_device_present(dev->net) &&
2818 	    netif_running(dev->net) &&
2819 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2820 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2821 		ret = usb_submit_urb(urb, GFP_ATOMIC);
2822 		switch (ret) {
2823 		case 0:
2824 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2825 			break;
2826 		case -EPIPE:
2827 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2828 			break;
2829 		case -ENODEV:
2830 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
2831 			netif_device_detach(dev->net);
2832 			break;
2833 		case -EHOSTUNREACH:
2834 			ret = -ENOLINK;
2835 			break;
2836 		default:
2837 			netif_dbg(dev, rx_err, dev->net,
2838 				  "rx submit, %d\n", ret);
2839 			tasklet_schedule(&dev->bh);
2840 		}
2841 	} else {
2842 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2843 		ret = -ENOLINK;
2844 	}
2845 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2846 	if (ret) {
2847 		dev_kfree_skb_any(skb);
2848 		usb_free_urb(urb);
2849 	}
2850 	return ret;
2851 }
2852 
2853 static void rx_complete(struct urb *urb)
2854 {
2855 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
2856 	struct skb_data	*entry = (struct skb_data *)skb->cb;
2857 	struct lan78xx_net *dev = entry->dev;
2858 	int urb_status = urb->status;
2859 	enum skb_state state;
2860 
2861 	skb_put(skb, urb->actual_length);
2862 	state = rx_done;
2863 	entry->urb = NULL;
2864 
2865 	switch (urb_status) {
2866 	case 0:
2867 		if (skb->len < dev->net->hard_header_len) {
2868 			state = rx_cleanup;
2869 			dev->net->stats.rx_errors++;
2870 			dev->net->stats.rx_length_errors++;
2871 			netif_dbg(dev, rx_err, dev->net,
2872 				  "rx length %d\n", skb->len);
2873 		}
2874 		usb_mark_last_busy(dev->udev);
2875 		break;
2876 	case -EPIPE:
2877 		dev->net->stats.rx_errors++;
2878 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2879 		/* FALLTHROUGH */
2880 	case -ECONNRESET:				/* async unlink */
2881 	case -ESHUTDOWN:				/* hardware gone */
2882 		netif_dbg(dev, ifdown, dev->net,
2883 			  "rx shutdown, code %d\n", urb_status);
2884 		state = rx_cleanup;
2885 		entry->urb = urb;
2886 		urb = NULL;
2887 		break;
2888 	case -EPROTO:
2889 	case -ETIME:
2890 	case -EILSEQ:
2891 		dev->net->stats.rx_errors++;
2892 		state = rx_cleanup;
2893 		entry->urb = urb;
2894 		urb = NULL;
2895 		break;
2896 
2897 	/* data overrun ... flush fifo? */
2898 	case -EOVERFLOW:
2899 		dev->net->stats.rx_over_errors++;
2900 		/* FALLTHROUGH */
2901 
2902 	default:
2903 		state = rx_cleanup;
2904 		dev->net->stats.rx_errors++;
2905 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2906 		break;
2907 	}
2908 
2909 	state = defer_bh(dev, skb, &dev->rxq, state);
2910 
2911 	if (urb) {
2912 		if (netif_running(dev->net) &&
2913 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2914 		    state != unlink_start) {
2915 			rx_submit(dev, urb, GFP_ATOMIC);
2916 			return;
2917 		}
2918 		usb_free_urb(urb);
2919 	}
2920 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2921 }
2922 
2923 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2924 {
2925 	int length;
2926 	struct urb *urb = NULL;
2927 	struct skb_data *entry;
2928 	unsigned long flags;
2929 	struct sk_buff_head *tqp = &dev->txq_pend;
2930 	struct sk_buff *skb, *skb2;
2931 	int ret;
2932 	int count, pos;
2933 	int skb_totallen, pkt_cnt;
2934 
2935 	skb_totallen = 0;
2936 	pkt_cnt = 0;
2937 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2938 		if (skb_is_gso(skb)) {
2939 			if (pkt_cnt) {
2940 				/* handle previous packets first */
2941 				break;
2942 			}
2943 			length = skb->len;
2944 			skb2 = skb_dequeue(tqp);
2945 			goto gso_skb;
2946 		}
2947 
2948 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2949 			break;
2950 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2951 		pkt_cnt++;
2952 	}
2953 
2954 	/* copy to a single skb */
2955 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2956 	if (!skb)
2957 		goto drop;
2958 
2959 	skb_put(skb, skb_totallen);
2960 
2961 	for (count = pos = 0; count < pkt_cnt; count++) {
2962 		skb2 = skb_dequeue(tqp);
2963 		if (skb2) {
2964 			memcpy(skb->data + pos, skb2->data, skb2->len);
2965 			pos += roundup(skb2->len, sizeof(u32));
2966 			dev_kfree_skb(skb2);
2967 		}
2968 	}
2969 
2970 	length = skb_totallen;
2971 
2972 gso_skb:
2973 	urb = usb_alloc_urb(0, GFP_ATOMIC);
2974 	if (!urb) {
2975 		netif_dbg(dev, tx_err, dev->net, "no urb\n");
2976 		goto drop;
2977 	}
2978 
2979 	entry = (struct skb_data *)skb->cb;
2980 	entry->urb = urb;
2981 	entry->dev = dev;
2982 	entry->length = length;
2983 
2984 	spin_lock_irqsave(&dev->txq.lock, flags);
2985 	ret = usb_autopm_get_interface_async(dev->intf);
2986 	if (ret < 0) {
2987 		spin_unlock_irqrestore(&dev->txq.lock, flags);
2988 		goto drop;
2989 	}
2990 
2991 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2992 			  skb->data, skb->len, tx_complete, skb);
2993 
2994 	if (length % dev->maxpacket == 0) {
2995 		/* send USB_ZERO_PACKET */
2996 		urb->transfer_flags |= URB_ZERO_PACKET;
2997 	}
2998 
2999 #ifdef CONFIG_PM
3000 	/* if this triggers the device is still a sleep */
3001 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3002 		/* transmission will be done in resume */
3003 		usb_anchor_urb(urb, &dev->deferred);
3004 		/* no use to process more packets */
3005 		netif_stop_queue(dev->net);
3006 		usb_put_urb(urb);
3007 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3008 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3009 		return;
3010 	}
3011 #endif
3012 
3013 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3014 	switch (ret) {
3015 	case 0:
3016 		dev->net->trans_start = jiffies;
3017 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3018 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3019 			netif_stop_queue(dev->net);
3020 		break;
3021 	case -EPIPE:
3022 		netif_stop_queue(dev->net);
3023 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3024 		usb_autopm_put_interface_async(dev->intf);
3025 		break;
3026 	default:
3027 		usb_autopm_put_interface_async(dev->intf);
3028 		netif_dbg(dev, tx_err, dev->net,
3029 			  "tx: submit urb err %d\n", ret);
3030 		break;
3031 	}
3032 
3033 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3034 
3035 	if (ret) {
3036 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3037 drop:
3038 		dev->net->stats.tx_dropped++;
3039 		if (skb)
3040 			dev_kfree_skb_any(skb);
3041 		usb_free_urb(urb);
3042 	} else
3043 		netif_dbg(dev, tx_queued, dev->net,
3044 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3045 }
3046 
3047 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3048 {
3049 	struct urb *urb;
3050 	int i;
3051 
3052 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3053 		for (i = 0; i < 10; i++) {
3054 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3055 				break;
3056 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3057 			if (urb)
3058 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3059 					return;
3060 		}
3061 
3062 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3063 			tasklet_schedule(&dev->bh);
3064 	}
3065 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3066 		netif_wake_queue(dev->net);
3067 }
3068 
3069 static void lan78xx_bh(unsigned long param)
3070 {
3071 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3072 	struct sk_buff *skb;
3073 	struct skb_data *entry;
3074 
3075 	while ((skb = skb_dequeue(&dev->done))) {
3076 		entry = (struct skb_data *)(skb->cb);
3077 		switch (entry->state) {
3078 		case rx_done:
3079 			entry->state = rx_cleanup;
3080 			rx_process(dev, skb);
3081 			continue;
3082 		case tx_done:
3083 			usb_free_urb(entry->urb);
3084 			dev_kfree_skb(skb);
3085 			continue;
3086 		case rx_cleanup:
3087 			usb_free_urb(entry->urb);
3088 			dev_kfree_skb(skb);
3089 			continue;
3090 		default:
3091 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3092 			return;
3093 		}
3094 	}
3095 
3096 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3097 		/* reset update timer delta */
3098 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3099 			dev->delta = 1;
3100 			mod_timer(&dev->stat_monitor,
3101 				  jiffies + STAT_UPDATE_TIMER);
3102 		}
3103 
3104 		if (!skb_queue_empty(&dev->txq_pend))
3105 			lan78xx_tx_bh(dev);
3106 
3107 		if (!timer_pending(&dev->delay) &&
3108 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3109 			lan78xx_rx_bh(dev);
3110 	}
3111 }
3112 
3113 static void lan78xx_delayedwork(struct work_struct *work)
3114 {
3115 	int status;
3116 	struct lan78xx_net *dev;
3117 
3118 	dev = container_of(work, struct lan78xx_net, wq.work);
3119 
3120 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3121 		unlink_urbs(dev, &dev->txq);
3122 		status = usb_autopm_get_interface(dev->intf);
3123 		if (status < 0)
3124 			goto fail_pipe;
3125 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3126 		usb_autopm_put_interface(dev->intf);
3127 		if (status < 0 &&
3128 		    status != -EPIPE &&
3129 		    status != -ESHUTDOWN) {
3130 			if (netif_msg_tx_err(dev))
3131 fail_pipe:
3132 				netdev_err(dev->net,
3133 					   "can't clear tx halt, status %d\n",
3134 					   status);
3135 		} else {
3136 			clear_bit(EVENT_TX_HALT, &dev->flags);
3137 			if (status != -ESHUTDOWN)
3138 				netif_wake_queue(dev->net);
3139 		}
3140 	}
3141 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3142 		unlink_urbs(dev, &dev->rxq);
3143 		status = usb_autopm_get_interface(dev->intf);
3144 		if (status < 0)
3145 				goto fail_halt;
3146 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3147 		usb_autopm_put_interface(dev->intf);
3148 		if (status < 0 &&
3149 		    status != -EPIPE &&
3150 		    status != -ESHUTDOWN) {
3151 			if (netif_msg_rx_err(dev))
3152 fail_halt:
3153 				netdev_err(dev->net,
3154 					   "can't clear rx halt, status %d\n",
3155 					   status);
3156 		} else {
3157 			clear_bit(EVENT_RX_HALT, &dev->flags);
3158 			tasklet_schedule(&dev->bh);
3159 		}
3160 	}
3161 
3162 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3163 		int ret = 0;
3164 
3165 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3166 		status = usb_autopm_get_interface(dev->intf);
3167 		if (status < 0)
3168 			goto skip_reset;
3169 		if (lan78xx_link_reset(dev) < 0) {
3170 			usb_autopm_put_interface(dev->intf);
3171 skip_reset:
3172 			netdev_info(dev->net, "link reset failed (%d)\n",
3173 				    ret);
3174 		} else {
3175 			usb_autopm_put_interface(dev->intf);
3176 		}
3177 	}
3178 
3179 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3180 		lan78xx_update_stats(dev);
3181 
3182 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3183 
3184 		mod_timer(&dev->stat_monitor,
3185 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3186 
3187 		dev->delta = min((dev->delta * 2), 50);
3188 	}
3189 }
3190 
3191 static void intr_complete(struct urb *urb)
3192 {
3193 	struct lan78xx_net *dev = urb->context;
3194 	int status = urb->status;
3195 
3196 	switch (status) {
3197 	/* success */
3198 	case 0:
3199 		lan78xx_status(dev, urb);
3200 		break;
3201 
3202 	/* software-driven interface shutdown */
3203 	case -ENOENT:			/* urb killed */
3204 	case -ESHUTDOWN:		/* hardware gone */
3205 		netif_dbg(dev, ifdown, dev->net,
3206 			  "intr shutdown, code %d\n", status);
3207 		return;
3208 
3209 	/* NOTE:  not throttling like RX/TX, since this endpoint
3210 	 * already polls infrequently
3211 	 */
3212 	default:
3213 		netdev_dbg(dev->net, "intr status %d\n", status);
3214 		break;
3215 	}
3216 
3217 	if (!netif_running(dev->net))
3218 		return;
3219 
3220 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3221 	status = usb_submit_urb(urb, GFP_ATOMIC);
3222 	if (status != 0)
3223 		netif_err(dev, timer, dev->net,
3224 			  "intr resubmit --> %d\n", status);
3225 }
3226 
3227 static void lan78xx_disconnect(struct usb_interface *intf)
3228 {
3229 	struct lan78xx_net		*dev;
3230 	struct usb_device		*udev;
3231 	struct net_device		*net;
3232 
3233 	dev = usb_get_intfdata(intf);
3234 	usb_set_intfdata(intf, NULL);
3235 	if (!dev)
3236 		return;
3237 
3238 	udev = interface_to_usbdev(intf);
3239 
3240 	net = dev->net;
3241 	unregister_netdev(net);
3242 
3243 	cancel_delayed_work_sync(&dev->wq);
3244 
3245 	usb_scuttle_anchored_urbs(&dev->deferred);
3246 
3247 	lan78xx_unbind(dev, intf);
3248 
3249 	usb_kill_urb(dev->urb_intr);
3250 	usb_free_urb(dev->urb_intr);
3251 
3252 	free_netdev(net);
3253 	usb_put_dev(udev);
3254 }
3255 
3256 void lan78xx_tx_timeout(struct net_device *net)
3257 {
3258 	struct lan78xx_net *dev = netdev_priv(net);
3259 
3260 	unlink_urbs(dev, &dev->txq);
3261 	tasklet_schedule(&dev->bh);
3262 }
3263 
3264 static const struct net_device_ops lan78xx_netdev_ops = {
3265 	.ndo_open		= lan78xx_open,
3266 	.ndo_stop		= lan78xx_stop,
3267 	.ndo_start_xmit		= lan78xx_start_xmit,
3268 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3269 	.ndo_change_mtu		= lan78xx_change_mtu,
3270 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3271 	.ndo_validate_addr	= eth_validate_addr,
3272 	.ndo_do_ioctl		= lan78xx_ioctl,
3273 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3274 	.ndo_set_features	= lan78xx_set_features,
3275 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3276 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3277 };
3278 
3279 static void lan78xx_stat_monitor(unsigned long param)
3280 {
3281 	struct lan78xx_net *dev;
3282 
3283 	dev = (struct lan78xx_net *)param;
3284 
3285 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3286 }
3287 
3288 static int lan78xx_probe(struct usb_interface *intf,
3289 			 const struct usb_device_id *id)
3290 {
3291 	struct lan78xx_net *dev;
3292 	struct net_device *netdev;
3293 	struct usb_device *udev;
3294 	int ret;
3295 	unsigned maxp;
3296 	unsigned period;
3297 	u8 *buf = NULL;
3298 
3299 	udev = interface_to_usbdev(intf);
3300 	udev = usb_get_dev(udev);
3301 
3302 	ret = -ENOMEM;
3303 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3304 	if (!netdev) {
3305 			dev_err(&intf->dev, "Error: OOM\n");
3306 			goto out1;
3307 	}
3308 
3309 	/* netdev_printk() needs this */
3310 	SET_NETDEV_DEV(netdev, &intf->dev);
3311 
3312 	dev = netdev_priv(netdev);
3313 	dev->udev = udev;
3314 	dev->intf = intf;
3315 	dev->net = netdev;
3316 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3317 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3318 
3319 	skb_queue_head_init(&dev->rxq);
3320 	skb_queue_head_init(&dev->txq);
3321 	skb_queue_head_init(&dev->done);
3322 	skb_queue_head_init(&dev->rxq_pause);
3323 	skb_queue_head_init(&dev->txq_pend);
3324 	mutex_init(&dev->phy_mutex);
3325 
3326 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3327 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3328 	init_usb_anchor(&dev->deferred);
3329 
3330 	netdev->netdev_ops = &lan78xx_netdev_ops;
3331 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3332 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3333 
3334 	dev->stat_monitor.function = lan78xx_stat_monitor;
3335 	dev->stat_monitor.data = (unsigned long)dev;
3336 	dev->delta = 1;
3337 	init_timer(&dev->stat_monitor);
3338 
3339 	mutex_init(&dev->stats.access_lock);
3340 
3341 	ret = lan78xx_bind(dev, intf);
3342 	if (ret < 0)
3343 		goto out2;
3344 	strcpy(netdev->name, "eth%d");
3345 
3346 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3347 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3348 
3349 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3350 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3351 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3352 
3353 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3354 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3355 
3356 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3357 					dev->ep_intr->desc.bEndpointAddress &
3358 					USB_ENDPOINT_NUMBER_MASK);
3359 	period = dev->ep_intr->desc.bInterval;
3360 
3361 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3362 	buf = kmalloc(maxp, GFP_KERNEL);
3363 	if (buf) {
3364 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3365 		if (!dev->urb_intr) {
3366 			kfree(buf);
3367 			goto out3;
3368 		} else {
3369 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3370 					 dev->pipe_intr, buf, maxp,
3371 					 intr_complete, dev, period);
3372 		}
3373 	}
3374 
3375 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3376 
3377 	/* driver requires remote-wakeup capability during autosuspend. */
3378 	intf->needs_remote_wakeup = 1;
3379 
3380 	ret = register_netdev(netdev);
3381 	if (ret != 0) {
3382 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3383 		goto out2;
3384 	}
3385 
3386 	usb_set_intfdata(intf, dev);
3387 
3388 	ret = device_set_wakeup_enable(&udev->dev, true);
3389 
3390 	 /* Default delay of 2sec has more overhead than advantage.
3391 	  * Set to 10sec as default.
3392 	  */
3393 	pm_runtime_set_autosuspend_delay(&udev->dev,
3394 					 DEFAULT_AUTOSUSPEND_DELAY);
3395 
3396 	return 0;
3397 
3398 out3:
3399 	lan78xx_unbind(dev, intf);
3400 out2:
3401 	free_netdev(netdev);
3402 out1:
3403 	usb_put_dev(udev);
3404 
3405 	return ret;
3406 }
3407 
3408 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3409 {
3410 	const u16 crc16poly = 0x8005;
3411 	int i;
3412 	u16 bit, crc, msb;
3413 	u8 data;
3414 
3415 	crc = 0xFFFF;
3416 	for (i = 0; i < len; i++) {
3417 		data = *buf++;
3418 		for (bit = 0; bit < 8; bit++) {
3419 			msb = crc >> 15;
3420 			crc <<= 1;
3421 
3422 			if (msb ^ (u16)(data & 1)) {
3423 				crc ^= crc16poly;
3424 				crc |= (u16)0x0001U;
3425 			}
3426 			data >>= 1;
3427 		}
3428 	}
3429 
3430 	return crc;
3431 }
3432 
3433 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3434 {
3435 	u32 buf;
3436 	int ret;
3437 	int mask_index;
3438 	u16 crc;
3439 	u32 temp_wucsr;
3440 	u32 temp_pmt_ctl;
3441 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3442 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3443 	const u8 arp_type[2] = { 0x08, 0x06 };
3444 
3445 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3446 	buf &= ~MAC_TX_TXEN_;
3447 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3448 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3449 	buf &= ~MAC_RX_RXEN_;
3450 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3451 
3452 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3453 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3454 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3455 
3456 	temp_wucsr = 0;
3457 
3458 	temp_pmt_ctl = 0;
3459 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3460 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3461 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3462 
3463 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3464 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3465 
3466 	mask_index = 0;
3467 	if (wol & WAKE_PHY) {
3468 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3469 
3470 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3471 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3472 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3473 	}
3474 	if (wol & WAKE_MAGIC) {
3475 		temp_wucsr |= WUCSR_MPEN_;
3476 
3477 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3478 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3479 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3480 	}
3481 	if (wol & WAKE_BCAST) {
3482 		temp_wucsr |= WUCSR_BCST_EN_;
3483 
3484 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3485 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3486 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3487 	}
3488 	if (wol & WAKE_MCAST) {
3489 		temp_wucsr |= WUCSR_WAKE_EN_;
3490 
3491 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3492 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3493 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3494 					WUF_CFGX_EN_ |
3495 					WUF_CFGX_TYPE_MCAST_ |
3496 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3497 					(crc & WUF_CFGX_CRC16_MASK_));
3498 
3499 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3500 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3501 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3502 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3503 		mask_index++;
3504 
3505 		/* for IPv6 Multicast */
3506 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3507 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3508 					WUF_CFGX_EN_ |
3509 					WUF_CFGX_TYPE_MCAST_ |
3510 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3511 					(crc & WUF_CFGX_CRC16_MASK_));
3512 
3513 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3514 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3515 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3516 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3517 		mask_index++;
3518 
3519 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3520 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3521 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3522 	}
3523 	if (wol & WAKE_UCAST) {
3524 		temp_wucsr |= WUCSR_PFDA_EN_;
3525 
3526 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3527 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3528 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3529 	}
3530 	if (wol & WAKE_ARP) {
3531 		temp_wucsr |= WUCSR_WAKE_EN_;
3532 
3533 		/* set WUF_CFG & WUF_MASK
3534 		 * for packettype (offset 12,13) = ARP (0x0806)
3535 		 */
3536 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3537 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3538 					WUF_CFGX_EN_ |
3539 					WUF_CFGX_TYPE_ALL_ |
3540 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3541 					(crc & WUF_CFGX_CRC16_MASK_));
3542 
3543 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3544 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3545 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3546 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3547 		mask_index++;
3548 
3549 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3550 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3551 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3552 	}
3553 
3554 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3555 
3556 	/* when multiple WOL bits are set */
3557 	if (hweight_long((unsigned long)wol) > 1) {
3558 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3559 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3560 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3561 	}
3562 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3563 
3564 	/* clear WUPS */
3565 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3566 	buf |= PMT_CTL_WUPS_MASK_;
3567 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3568 
3569 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3570 	buf |= MAC_RX_RXEN_;
3571 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3572 
3573 	return 0;
3574 }
3575 
3576 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3577 {
3578 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3579 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3580 	u32 buf;
3581 	int ret;
3582 	int event;
3583 
3584 	event = message.event;
3585 
3586 	if (!dev->suspend_count++) {
3587 		spin_lock_irq(&dev->txq.lock);
3588 		/* don't autosuspend while transmitting */
3589 		if ((skb_queue_len(&dev->txq) ||
3590 		     skb_queue_len(&dev->txq_pend)) &&
3591 			PMSG_IS_AUTO(message)) {
3592 			spin_unlock_irq(&dev->txq.lock);
3593 			ret = -EBUSY;
3594 			goto out;
3595 		} else {
3596 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3597 			spin_unlock_irq(&dev->txq.lock);
3598 		}
3599 
3600 		/* stop TX & RX */
3601 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3602 		buf &= ~MAC_TX_TXEN_;
3603 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3604 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3605 		buf &= ~MAC_RX_RXEN_;
3606 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3607 
3608 		/* empty out the rx and queues */
3609 		netif_device_detach(dev->net);
3610 		lan78xx_terminate_urbs(dev);
3611 		usb_kill_urb(dev->urb_intr);
3612 
3613 		/* reattach */
3614 		netif_device_attach(dev->net);
3615 	}
3616 
3617 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3618 		del_timer(&dev->stat_monitor);
3619 
3620 		if (PMSG_IS_AUTO(message)) {
3621 			/* auto suspend (selective suspend) */
3622 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3623 			buf &= ~MAC_TX_TXEN_;
3624 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3625 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3626 			buf &= ~MAC_RX_RXEN_;
3627 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3628 
3629 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3630 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3631 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3632 
3633 			/* set goodframe wakeup */
3634 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3635 
3636 			buf |= WUCSR_RFE_WAKE_EN_;
3637 			buf |= WUCSR_STORE_WAKE_;
3638 
3639 			ret = lan78xx_write_reg(dev, WUCSR, buf);
3640 
3641 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3642 
3643 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3644 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
3645 
3646 			buf |= PMT_CTL_PHY_WAKE_EN_;
3647 			buf |= PMT_CTL_WOL_EN_;
3648 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
3649 			buf |= PMT_CTL_SUS_MODE_3_;
3650 
3651 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3652 
3653 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3654 
3655 			buf |= PMT_CTL_WUPS_MASK_;
3656 
3657 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3658 
3659 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3660 			buf |= MAC_RX_RXEN_;
3661 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3662 		} else {
3663 			lan78xx_set_suspend(dev, pdata->wol);
3664 		}
3665 	}
3666 
3667 	ret = 0;
3668 out:
3669 	return ret;
3670 }
3671 
3672 int lan78xx_resume(struct usb_interface *intf)
3673 {
3674 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3675 	struct sk_buff *skb;
3676 	struct urb *res;
3677 	int ret;
3678 	u32 buf;
3679 
3680 	if (!timer_pending(&dev->stat_monitor)) {
3681 		dev->delta = 1;
3682 		mod_timer(&dev->stat_monitor,
3683 			  jiffies + STAT_UPDATE_TIMER);
3684 	}
3685 
3686 	if (!--dev->suspend_count) {
3687 		/* resume interrupt URBs */
3688 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3689 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
3690 
3691 		spin_lock_irq(&dev->txq.lock);
3692 		while ((res = usb_get_from_anchor(&dev->deferred))) {
3693 			skb = (struct sk_buff *)res->context;
3694 			ret = usb_submit_urb(res, GFP_ATOMIC);
3695 			if (ret < 0) {
3696 				dev_kfree_skb_any(skb);
3697 				usb_free_urb(res);
3698 				usb_autopm_put_interface_async(dev->intf);
3699 			} else {
3700 				dev->net->trans_start = jiffies;
3701 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
3702 			}
3703 		}
3704 
3705 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3706 		spin_unlock_irq(&dev->txq.lock);
3707 
3708 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3709 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3710 				netif_start_queue(dev->net);
3711 			tasklet_schedule(&dev->bh);
3712 		}
3713 	}
3714 
3715 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3716 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3717 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3718 
3719 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3720 					     WUCSR2_ARP_RCD_ |
3721 					     WUCSR2_IPV6_TCPSYN_RCD_ |
3722 					     WUCSR2_IPV4_TCPSYN_RCD_);
3723 
3724 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3725 					    WUCSR_EEE_RX_WAKE_ |
3726 					    WUCSR_PFDA_FR_ |
3727 					    WUCSR_RFE_WAKE_FR_ |
3728 					    WUCSR_WUFR_ |
3729 					    WUCSR_MPR_ |
3730 					    WUCSR_BCST_FR_);
3731 
3732 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3733 	buf |= MAC_TX_TXEN_;
3734 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3735 
3736 	return 0;
3737 }
3738 
3739 int lan78xx_reset_resume(struct usb_interface *intf)
3740 {
3741 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3742 
3743 	lan78xx_reset(dev);
3744 
3745 	lan78xx_phy_init(dev);
3746 
3747 	return lan78xx_resume(intf);
3748 }
3749 
3750 static const struct usb_device_id products[] = {
3751 	{
3752 	/* LAN7800 USB Gigabit Ethernet Device */
3753 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3754 	},
3755 	{
3756 	/* LAN7850 USB Gigabit Ethernet Device */
3757 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3758 	},
3759 	{},
3760 };
3761 MODULE_DEVICE_TABLE(usb, products);
3762 
3763 static struct usb_driver lan78xx_driver = {
3764 	.name			= DRIVER_NAME,
3765 	.id_table		= products,
3766 	.probe			= lan78xx_probe,
3767 	.disconnect		= lan78xx_disconnect,
3768 	.suspend		= lan78xx_suspend,
3769 	.resume			= lan78xx_resume,
3770 	.reset_resume		= lan78xx_reset_resume,
3771 	.supports_autosuspend	= 1,
3772 	.disable_hub_initiated_lpm = 1,
3773 };
3774 
3775 module_usb_driver(lan78xx_driver);
3776 
3777 MODULE_AUTHOR(DRIVER_AUTHOR);
3778 MODULE_DESCRIPTION(DRIVER_DESC);
3779 MODULE_LICENSE("GPL");
3780