xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision 8b235f2f)
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/mii.h>
23 #include <linux/usb.h>
24 #include <linux/crc32.h>
25 #include <linux/signal.h>
26 #include <linux/slab.h>
27 #include <linux/if_vlan.h>
28 #include <linux/uaccess.h>
29 #include <linux/list.h>
30 #include <linux/ip.h>
31 #include <linux/ipv6.h>
32 #include <linux/mdio.h>
33 #include <net/ip6_checksum.h>
34 #include "lan78xx.h"
35 
36 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME	"lan78xx"
39 #define DRIVER_VERSION	"1.0.0"
40 
41 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
42 #define THROTTLE_JIFFIES		(HZ / 8)
43 #define UNLINK_TIMEOUT_MS		3
44 
45 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
46 
47 #define SS_USB_PKT_SIZE			(1024)
48 #define HS_USB_PKT_SIZE			(512)
49 #define FS_USB_PKT_SIZE			(64)
50 
51 #define MAX_RX_FIFO_SIZE		(12 * 1024)
52 #define MAX_TX_FIFO_SIZE		(12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY		(0x0800)
55 #define MAX_SINGLE_PACKET_SIZE		(9000)
56 #define DEFAULT_TX_CSUM_ENABLE		(true)
57 #define DEFAULT_RX_CSUM_ENABLE		(true)
58 #define DEFAULT_TSO_CSUM_ENABLE		(true)
59 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
60 #define INTERNAL_PHY_ID			(2)	/* 2: GMII */
61 #define TX_OVERHEAD			(8)
62 #define RXW_PADDING			2
63 
64 #define LAN78XX_USB_VENDOR_ID		(0x0424)
65 #define LAN7800_USB_PRODUCT_ID		(0x7800)
66 #define LAN7850_USB_PRODUCT_ID		(0x7850)
67 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
68 #define LAN78XX_OTP_MAGIC		(0x78F3)
69 
70 #define	MII_READ			1
71 #define	MII_WRITE			0
72 
73 #define EEPROM_INDICATOR		(0xA5)
74 #define EEPROM_MAC_OFFSET		(0x01)
75 #define MAX_EEPROM_SIZE			512
76 #define OTP_INDICATOR_1			(0xF3)
77 #define OTP_INDICATOR_2			(0xF7)
78 
79 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
80 					 WAKE_MCAST | WAKE_BCAST | \
81 					 WAKE_ARP | WAKE_MAGIC)
82 
83 /* USB related defines */
84 #define BULK_IN_PIPE			1
85 #define BULK_OUT_PIPE			2
86 
87 /* default autosuspend delay (mSec)*/
88 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
89 
90 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
91 	"RX FCS Errors",
92 	"RX Alignment Errors",
93 	"Rx Fragment Errors",
94 	"RX Jabber Errors",
95 	"RX Undersize Frame Errors",
96 	"RX Oversize Frame Errors",
97 	"RX Dropped Frames",
98 	"RX Unicast Byte Count",
99 	"RX Broadcast Byte Count",
100 	"RX Multicast Byte Count",
101 	"RX Unicast Frames",
102 	"RX Broadcast Frames",
103 	"RX Multicast Frames",
104 	"RX Pause Frames",
105 	"RX 64 Byte Frames",
106 	"RX 65 - 127 Byte Frames",
107 	"RX 128 - 255 Byte Frames",
108 	"RX 256 - 511 Bytes Frames",
109 	"RX 512 - 1023 Byte Frames",
110 	"RX 1024 - 1518 Byte Frames",
111 	"RX Greater 1518 Byte Frames",
112 	"EEE RX LPI Transitions",
113 	"EEE RX LPI Time",
114 	"TX FCS Errors",
115 	"TX Excess Deferral Errors",
116 	"TX Carrier Errors",
117 	"TX Bad Byte Count",
118 	"TX Single Collisions",
119 	"TX Multiple Collisions",
120 	"TX Excessive Collision",
121 	"TX Late Collisions",
122 	"TX Unicast Byte Count",
123 	"TX Broadcast Byte Count",
124 	"TX Multicast Byte Count",
125 	"TX Unicast Frames",
126 	"TX Broadcast Frames",
127 	"TX Multicast Frames",
128 	"TX Pause Frames",
129 	"TX 64 Byte Frames",
130 	"TX 65 - 127 Byte Frames",
131 	"TX 128 - 255 Byte Frames",
132 	"TX 256 - 511 Bytes Frames",
133 	"TX 512 - 1023 Byte Frames",
134 	"TX 1024 - 1518 Byte Frames",
135 	"TX Greater 1518 Byte Frames",
136 	"EEE TX LPI Transitions",
137 	"EEE TX LPI Time",
138 };
139 
140 struct lan78xx_statstage {
141 	u32 rx_fcs_errors;
142 	u32 rx_alignment_errors;
143 	u32 rx_fragment_errors;
144 	u32 rx_jabber_errors;
145 	u32 rx_undersize_frame_errors;
146 	u32 rx_oversize_frame_errors;
147 	u32 rx_dropped_frames;
148 	u32 rx_unicast_byte_count;
149 	u32 rx_broadcast_byte_count;
150 	u32 rx_multicast_byte_count;
151 	u32 rx_unicast_frames;
152 	u32 rx_broadcast_frames;
153 	u32 rx_multicast_frames;
154 	u32 rx_pause_frames;
155 	u32 rx_64_byte_frames;
156 	u32 rx_65_127_byte_frames;
157 	u32 rx_128_255_byte_frames;
158 	u32 rx_256_511_bytes_frames;
159 	u32 rx_512_1023_byte_frames;
160 	u32 rx_1024_1518_byte_frames;
161 	u32 rx_greater_1518_byte_frames;
162 	u32 eee_rx_lpi_transitions;
163 	u32 eee_rx_lpi_time;
164 	u32 tx_fcs_errors;
165 	u32 tx_excess_deferral_errors;
166 	u32 tx_carrier_errors;
167 	u32 tx_bad_byte_count;
168 	u32 tx_single_collisions;
169 	u32 tx_multiple_collisions;
170 	u32 tx_excessive_collision;
171 	u32 tx_late_collisions;
172 	u32 tx_unicast_byte_count;
173 	u32 tx_broadcast_byte_count;
174 	u32 tx_multicast_byte_count;
175 	u32 tx_unicast_frames;
176 	u32 tx_broadcast_frames;
177 	u32 tx_multicast_frames;
178 	u32 tx_pause_frames;
179 	u32 tx_64_byte_frames;
180 	u32 tx_65_127_byte_frames;
181 	u32 tx_128_255_byte_frames;
182 	u32 tx_256_511_bytes_frames;
183 	u32 tx_512_1023_byte_frames;
184 	u32 tx_1024_1518_byte_frames;
185 	u32 tx_greater_1518_byte_frames;
186 	u32 eee_tx_lpi_transitions;
187 	u32 eee_tx_lpi_time;
188 };
189 
190 struct lan78xx_net;
191 
192 struct lan78xx_priv {
193 	struct lan78xx_net *dev;
194 	u32 rfe_ctl;
195 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
196 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
197 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
198 	struct mutex dataport_mutex; /* for dataport access */
199 	spinlock_t rfe_ctl_lock; /* for rfe register access */
200 	struct work_struct set_multicast;
201 	struct work_struct set_vlan;
202 	u32 wol;
203 };
204 
205 enum skb_state {
206 	illegal = 0,
207 	tx_start,
208 	tx_done,
209 	rx_start,
210 	rx_done,
211 	rx_cleanup,
212 	unlink_start
213 };
214 
215 struct skb_data {		/* skb->cb is one of these */
216 	struct urb *urb;
217 	struct lan78xx_net *dev;
218 	enum skb_state state;
219 	size_t length;
220 };
221 
222 struct usb_context {
223 	struct usb_ctrlrequest req;
224 	struct lan78xx_net *dev;
225 };
226 
227 #define EVENT_TX_HALT			0
228 #define EVENT_RX_HALT			1
229 #define EVENT_RX_MEMORY			2
230 #define EVENT_STS_SPLIT			3
231 #define EVENT_LINK_RESET		4
232 #define EVENT_RX_PAUSED			5
233 #define EVENT_DEV_WAKING		6
234 #define EVENT_DEV_ASLEEP		7
235 #define EVENT_DEV_OPEN			8
236 
237 struct lan78xx_net {
238 	struct net_device	*net;
239 	struct usb_device	*udev;
240 	struct usb_interface	*intf;
241 	void			*driver_priv;
242 
243 	int			rx_qlen;
244 	int			tx_qlen;
245 	struct sk_buff_head	rxq;
246 	struct sk_buff_head	txq;
247 	struct sk_buff_head	done;
248 	struct sk_buff_head	rxq_pause;
249 	struct sk_buff_head	txq_pend;
250 
251 	struct tasklet_struct	bh;
252 	struct delayed_work	wq;
253 
254 	struct usb_host_endpoint *ep_blkin;
255 	struct usb_host_endpoint *ep_blkout;
256 	struct usb_host_endpoint *ep_intr;
257 
258 	int			msg_enable;
259 
260 	struct urb		*urb_intr;
261 	struct usb_anchor	deferred;
262 
263 	struct mutex		phy_mutex; /* for phy access */
264 	unsigned		pipe_in, pipe_out, pipe_intr;
265 
266 	u32			hard_mtu;	/* count any extra framing */
267 	size_t			rx_urb_size;	/* size for rx urbs */
268 
269 	unsigned long		flags;
270 
271 	wait_queue_head_t	*wait;
272 	unsigned char		suspend_count;
273 
274 	unsigned		maxpacket;
275 	struct timer_list	delay;
276 
277 	unsigned long		data[5];
278 	struct mii_if_info	mii;
279 
280 	int			link_on;
281 	u8			mdix_ctrl;
282 };
283 
284 /* use ethtool to change the level for any given device */
285 static int msg_level = -1;
286 module_param(msg_level, int, 0);
287 MODULE_PARM_DESC(msg_level, "Override default message level");
288 
289 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
290 {
291 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
292 	int ret;
293 
294 	if (!buf)
295 		return -ENOMEM;
296 
297 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
298 			      USB_VENDOR_REQUEST_READ_REGISTER,
299 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
300 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
301 	if (likely(ret >= 0)) {
302 		le32_to_cpus(buf);
303 		*data = *buf;
304 	} else {
305 		netdev_warn(dev->net,
306 			    "Failed to read register index 0x%08x. ret = %d",
307 			    index, ret);
308 	}
309 
310 	kfree(buf);
311 
312 	return ret;
313 }
314 
315 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
316 {
317 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
318 	int ret;
319 
320 	if (!buf)
321 		return -ENOMEM;
322 
323 	*buf = data;
324 	cpu_to_le32s(buf);
325 
326 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
327 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
328 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
329 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
330 	if (unlikely(ret < 0)) {
331 		netdev_warn(dev->net,
332 			    "Failed to write register index 0x%08x. ret = %d",
333 			    index, ret);
334 	}
335 
336 	kfree(buf);
337 
338 	return ret;
339 }
340 
341 static int lan78xx_read_stats(struct lan78xx_net *dev,
342 			      struct lan78xx_statstage *data)
343 {
344 	int ret = 0;
345 	int i;
346 	struct lan78xx_statstage *stats;
347 	u32 *src;
348 	u32 *dst;
349 
350 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
351 	if (!stats)
352 		return -ENOMEM;
353 
354 	ret = usb_control_msg(dev->udev,
355 			      usb_rcvctrlpipe(dev->udev, 0),
356 			      USB_VENDOR_REQUEST_GET_STATS,
357 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
358 			      0,
359 			      0,
360 			      (void *)stats,
361 			      sizeof(*stats),
362 			      USB_CTRL_SET_TIMEOUT);
363 	if (likely(ret >= 0)) {
364 		src = (u32 *)stats;
365 		dst = (u32 *)data;
366 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
367 			le32_to_cpus(&src[i]);
368 			dst[i] = src[i];
369 		}
370 	} else {
371 		netdev_warn(dev->net,
372 			    "Failed to read stat ret = 0x%x", ret);
373 	}
374 
375 	kfree(stats);
376 
377 	return ret;
378 }
379 
380 /* Loop until the read is completed with timeout called with phy_mutex held */
381 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
382 {
383 	unsigned long start_time = jiffies;
384 	u32 val;
385 	int ret;
386 
387 	do {
388 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
389 		if (unlikely(ret < 0))
390 			return -EIO;
391 
392 		if (!(val & MII_ACC_MII_BUSY_))
393 			return 0;
394 	} while (!time_after(jiffies, start_time + HZ));
395 
396 	return -EIO;
397 }
398 
399 static inline u32 mii_access(int id, int index, int read)
400 {
401 	u32 ret;
402 
403 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
404 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
405 	if (read)
406 		ret |= MII_ACC_MII_READ_;
407 	else
408 		ret |= MII_ACC_MII_WRITE_;
409 	ret |= MII_ACC_MII_BUSY_;
410 
411 	return ret;
412 }
413 
414 static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
415 {
416 	struct lan78xx_net *dev = netdev_priv(netdev);
417 	u32 val, addr;
418 	int ret;
419 
420 	ret = usb_autopm_get_interface(dev->intf);
421 	if (ret < 0)
422 		return ret;
423 
424 	mutex_lock(&dev->phy_mutex);
425 
426 	/* confirm MII not busy */
427 	ret = lan78xx_phy_wait_not_busy(dev);
428 	if (ret < 0)
429 		goto done;
430 
431 	/* set the address, index & direction (read from PHY) */
432 	phy_id &= dev->mii.phy_id_mask;
433 	idx &= dev->mii.reg_num_mask;
434 	addr = mii_access(phy_id, idx, MII_READ);
435 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
436 
437 	ret = lan78xx_phy_wait_not_busy(dev);
438 	if (ret < 0)
439 		goto done;
440 
441 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
442 
443 	ret = (int)(val & 0xFFFF);
444 
445 done:
446 	mutex_unlock(&dev->phy_mutex);
447 	usb_autopm_put_interface(dev->intf);
448 	return ret;
449 }
450 
451 static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
452 			       int idx, int regval)
453 {
454 	struct lan78xx_net *dev = netdev_priv(netdev);
455 	u32 val, addr;
456 	int ret;
457 
458 	if (usb_autopm_get_interface(dev->intf) < 0)
459 		return;
460 
461 	mutex_lock(&dev->phy_mutex);
462 
463 	/* confirm MII not busy */
464 	ret = lan78xx_phy_wait_not_busy(dev);
465 	if (ret < 0)
466 		goto done;
467 
468 	val = regval;
469 	ret = lan78xx_write_reg(dev, MII_DATA, val);
470 
471 	/* set the address, index & direction (write to PHY) */
472 	phy_id &= dev->mii.phy_id_mask;
473 	idx &= dev->mii.reg_num_mask;
474 	addr = mii_access(phy_id, idx, MII_WRITE);
475 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
476 
477 	ret = lan78xx_phy_wait_not_busy(dev);
478 	if (ret < 0)
479 		goto done;
480 
481 done:
482 	mutex_unlock(&dev->phy_mutex);
483 	usb_autopm_put_interface(dev->intf);
484 }
485 
486 static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
487 			      int mmddev, int mmdidx, int regval)
488 {
489 	struct lan78xx_net *dev = netdev_priv(netdev);
490 	u32 val, addr;
491 	int ret;
492 
493 	if (usb_autopm_get_interface(dev->intf) < 0)
494 		return;
495 
496 	mutex_lock(&dev->phy_mutex);
497 
498 	/* confirm MII not busy */
499 	ret = lan78xx_phy_wait_not_busy(dev);
500 	if (ret < 0)
501 		goto done;
502 
503 	mmddev &= 0x1F;
504 
505 	/* set up device address for MMD */
506 	ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
507 
508 	phy_id &= dev->mii.phy_id_mask;
509 	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
510 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
511 
512 	ret = lan78xx_phy_wait_not_busy(dev);
513 	if (ret < 0)
514 		goto done;
515 
516 	/* select register of MMD */
517 	val = mmdidx;
518 	ret = lan78xx_write_reg(dev, MII_DATA, val);
519 
520 	phy_id &= dev->mii.phy_id_mask;
521 	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
522 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
523 
524 	ret = lan78xx_phy_wait_not_busy(dev);
525 	if (ret < 0)
526 		goto done;
527 
528 	/* select register data for MMD */
529 	val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
530 	ret = lan78xx_write_reg(dev, MII_DATA, val);
531 
532 	phy_id &= dev->mii.phy_id_mask;
533 	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
534 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
535 
536 	ret = lan78xx_phy_wait_not_busy(dev);
537 	if (ret < 0)
538 		goto done;
539 
540 	/* write to MMD */
541 	val = regval;
542 	ret = lan78xx_write_reg(dev, MII_DATA, val);
543 
544 	phy_id &= dev->mii.phy_id_mask;
545 	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
546 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
547 
548 	ret = lan78xx_phy_wait_not_busy(dev);
549 	if (ret < 0)
550 		goto done;
551 
552 done:
553 	mutex_unlock(&dev->phy_mutex);
554 	usb_autopm_put_interface(dev->intf);
555 }
556 
557 static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
558 			    int mmddev, int mmdidx)
559 {
560 	struct lan78xx_net *dev = netdev_priv(netdev);
561 	u32 val, addr;
562 	int ret;
563 
564 	ret = usb_autopm_get_interface(dev->intf);
565 	if (ret < 0)
566 		return ret;
567 
568 	mutex_lock(&dev->phy_mutex);
569 
570 	/* confirm MII not busy */
571 	ret = lan78xx_phy_wait_not_busy(dev);
572 	if (ret < 0)
573 		goto done;
574 
575 	/* set up device address for MMD */
576 	ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
577 
578 	phy_id &= dev->mii.phy_id_mask;
579 	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
580 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
581 
582 	ret = lan78xx_phy_wait_not_busy(dev);
583 	if (ret < 0)
584 		goto done;
585 
586 	/* select register of MMD */
587 	val = mmdidx;
588 	ret = lan78xx_write_reg(dev, MII_DATA, val);
589 
590 	phy_id &= dev->mii.phy_id_mask;
591 	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
592 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
593 
594 	ret = lan78xx_phy_wait_not_busy(dev);
595 	if (ret < 0)
596 		goto done;
597 
598 	/* select register data for MMD */
599 	val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
600 	ret = lan78xx_write_reg(dev, MII_DATA, val);
601 
602 	phy_id &= dev->mii.phy_id_mask;
603 	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
604 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
605 
606 	ret = lan78xx_phy_wait_not_busy(dev);
607 	if (ret < 0)
608 		goto done;
609 
610 	/* set the address, index & direction (read from PHY) */
611 	phy_id &= dev->mii.phy_id_mask;
612 	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
613 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
614 
615 	ret = lan78xx_phy_wait_not_busy(dev);
616 	if (ret < 0)
617 		goto done;
618 
619 	/* read from MMD */
620 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
621 
622 	ret = (int)(val & 0xFFFF);
623 
624 done:
625 	mutex_unlock(&dev->phy_mutex);
626 	usb_autopm_put_interface(dev->intf);
627 	return ret;
628 }
629 
630 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
631 {
632 	unsigned long start_time = jiffies;
633 	u32 val;
634 	int ret;
635 
636 	do {
637 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
638 		if (unlikely(ret < 0))
639 			return -EIO;
640 
641 		if (!(val & E2P_CMD_EPC_BUSY_) ||
642 		    (val & E2P_CMD_EPC_TIMEOUT_))
643 			break;
644 		usleep_range(40, 100);
645 	} while (!time_after(jiffies, start_time + HZ));
646 
647 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
648 		netdev_warn(dev->net, "EEPROM read operation timeout");
649 		return -EIO;
650 	}
651 
652 	return 0;
653 }
654 
655 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
656 {
657 	unsigned long start_time = jiffies;
658 	u32 val;
659 	int ret;
660 
661 	do {
662 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
663 		if (unlikely(ret < 0))
664 			return -EIO;
665 
666 		if (!(val & E2P_CMD_EPC_BUSY_))
667 			return 0;
668 
669 		usleep_range(40, 100);
670 	} while (!time_after(jiffies, start_time + HZ));
671 
672 	netdev_warn(dev->net, "EEPROM is busy");
673 	return -EIO;
674 }
675 
676 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
677 				   u32 length, u8 *data)
678 {
679 	u32 val;
680 	int i, ret;
681 
682 	ret = lan78xx_eeprom_confirm_not_busy(dev);
683 	if (ret)
684 		return ret;
685 
686 	for (i = 0; i < length; i++) {
687 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
688 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
689 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
690 		if (unlikely(ret < 0))
691 			return -EIO;
692 
693 		ret = lan78xx_wait_eeprom(dev);
694 		if (ret < 0)
695 			return ret;
696 
697 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
698 		if (unlikely(ret < 0))
699 			return -EIO;
700 
701 		data[i] = val & 0xFF;
702 		offset++;
703 	}
704 
705 	return 0;
706 }
707 
708 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
709 			       u32 length, u8 *data)
710 {
711 	u8 sig;
712 	int ret;
713 
714 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
715 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
716 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
717 	else
718 		ret = -EINVAL;
719 
720 	return ret;
721 }
722 
723 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
724 				    u32 length, u8 *data)
725 {
726 	u32 val;
727 	int i, ret;
728 
729 	ret = lan78xx_eeprom_confirm_not_busy(dev);
730 	if (ret)
731 		return ret;
732 
733 	/* Issue write/erase enable command */
734 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
735 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
736 	if (unlikely(ret < 0))
737 		return -EIO;
738 
739 	ret = lan78xx_wait_eeprom(dev);
740 	if (ret < 0)
741 		return ret;
742 
743 	for (i = 0; i < length; i++) {
744 		/* Fill data register */
745 		val = data[i];
746 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
747 		if (ret < 0)
748 			return ret;
749 
750 		/* Send "write" command */
751 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
752 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
753 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
754 		if (ret < 0)
755 			return ret;
756 
757 		ret = lan78xx_wait_eeprom(dev);
758 		if (ret < 0)
759 			return ret;
760 
761 		offset++;
762 	}
763 
764 	return 0;
765 }
766 
767 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
768 				u32 length, u8 *data)
769 {
770 	int i;
771 	int ret;
772 	u32 buf;
773 	unsigned long timeout;
774 
775 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
776 
777 	if (buf & OTP_PWR_DN_PWRDN_N_) {
778 		/* clear it and wait to be cleared */
779 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
780 
781 		timeout = jiffies + HZ;
782 		do {
783 			usleep_range(1, 10);
784 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
785 			if (time_after(jiffies, timeout)) {
786 				netdev_warn(dev->net,
787 					    "timeout on OTP_PWR_DN");
788 				return -EIO;
789 			}
790 		} while (buf & OTP_PWR_DN_PWRDN_N_);
791 	}
792 
793 	for (i = 0; i < length; i++) {
794 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
795 					((offset + i) >> 8) & OTP_ADDR1_15_11);
796 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
797 					((offset + i) & OTP_ADDR2_10_3));
798 
799 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
800 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
801 
802 		timeout = jiffies + HZ;
803 		do {
804 			udelay(1);
805 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
806 			if (time_after(jiffies, timeout)) {
807 				netdev_warn(dev->net,
808 					    "timeout on OTP_STATUS");
809 				return -EIO;
810 			}
811 		} while (buf & OTP_STATUS_BUSY_);
812 
813 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
814 
815 		data[i] = (u8)(buf & 0xFF);
816 	}
817 
818 	return 0;
819 }
820 
821 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
822 			    u32 length, u8 *data)
823 {
824 	u8 sig;
825 	int ret;
826 
827 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
828 
829 	if (ret == 0) {
830 		if (sig == OTP_INDICATOR_1)
831 			offset = offset;
832 		else if (sig == OTP_INDICATOR_2)
833 			offset += 0x100;
834 		else
835 			ret = -EINVAL;
836 		ret = lan78xx_read_raw_otp(dev, offset, length, data);
837 	}
838 
839 	return ret;
840 }
841 
842 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
843 {
844 	int i, ret;
845 
846 	for (i = 0; i < 100; i++) {
847 		u32 dp_sel;
848 
849 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
850 		if (unlikely(ret < 0))
851 			return -EIO;
852 
853 		if (dp_sel & DP_SEL_DPRDY_)
854 			return 0;
855 
856 		usleep_range(40, 100);
857 	}
858 
859 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
860 
861 	return -EIO;
862 }
863 
864 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
865 				  u32 addr, u32 length, u32 *buf)
866 {
867 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
868 	u32 dp_sel;
869 	int i, ret;
870 
871 	if (usb_autopm_get_interface(dev->intf) < 0)
872 			return 0;
873 
874 	mutex_lock(&pdata->dataport_mutex);
875 
876 	ret = lan78xx_dataport_wait_not_busy(dev);
877 	if (ret < 0)
878 		goto done;
879 
880 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
881 
882 	dp_sel &= ~DP_SEL_RSEL_MASK_;
883 	dp_sel |= ram_select;
884 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
885 
886 	for (i = 0; i < length; i++) {
887 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
888 
889 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
890 
891 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
892 
893 		ret = lan78xx_dataport_wait_not_busy(dev);
894 		if (ret < 0)
895 			goto done;
896 	}
897 
898 done:
899 	mutex_unlock(&pdata->dataport_mutex);
900 	usb_autopm_put_interface(dev->intf);
901 
902 	return ret;
903 }
904 
905 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
906 				    int index, u8 addr[ETH_ALEN])
907 {
908 	u32	temp;
909 
910 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
911 		temp = addr[3];
912 		temp = addr[2] | (temp << 8);
913 		temp = addr[1] | (temp << 8);
914 		temp = addr[0] | (temp << 8);
915 		pdata->pfilter_table[index][1] = temp;
916 		temp = addr[5];
917 		temp = addr[4] | (temp << 8);
918 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
919 		pdata->pfilter_table[index][0] = temp;
920 	}
921 }
922 
923 /* returns hash bit number for given MAC address */
924 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
925 {
926 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
927 }
928 
929 static void lan78xx_deferred_multicast_write(struct work_struct *param)
930 {
931 	struct lan78xx_priv *pdata =
932 			container_of(param, struct lan78xx_priv, set_multicast);
933 	struct lan78xx_net *dev = pdata->dev;
934 	int i;
935 	int ret;
936 
937 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
938 		  pdata->rfe_ctl);
939 
940 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
941 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
942 
943 	for (i = 1; i < NUM_OF_MAF; i++) {
944 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
945 		ret = lan78xx_write_reg(dev, MAF_LO(i),
946 					pdata->pfilter_table[i][1]);
947 		ret = lan78xx_write_reg(dev, MAF_HI(i),
948 					pdata->pfilter_table[i][0]);
949 	}
950 
951 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
952 }
953 
954 static void lan78xx_set_multicast(struct net_device *netdev)
955 {
956 	struct lan78xx_net *dev = netdev_priv(netdev);
957 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
958 	unsigned long flags;
959 	int i;
960 
961 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
962 
963 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
964 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
965 
966 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
967 			pdata->mchash_table[i] = 0;
968 	/* pfilter_table[0] has own HW address */
969 	for (i = 1; i < NUM_OF_MAF; i++) {
970 			pdata->pfilter_table[i][0] =
971 			pdata->pfilter_table[i][1] = 0;
972 	}
973 
974 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
975 
976 	if (dev->net->flags & IFF_PROMISC) {
977 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
978 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
979 	} else {
980 		if (dev->net->flags & IFF_ALLMULTI) {
981 			netif_dbg(dev, drv, dev->net,
982 				  "receive all multicast enabled");
983 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
984 		}
985 	}
986 
987 	if (netdev_mc_count(dev->net)) {
988 		struct netdev_hw_addr *ha;
989 		int i;
990 
991 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
992 
993 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
994 
995 		i = 1;
996 		netdev_for_each_mc_addr(ha, netdev) {
997 			/* set first 32 into Perfect Filter */
998 			if (i < 33) {
999 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1000 			} else {
1001 				u32 bitnum = lan78xx_hash(ha->addr);
1002 
1003 				pdata->mchash_table[bitnum / 32] |=
1004 							(1 << (bitnum % 32));
1005 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1006 			}
1007 			i++;
1008 		}
1009 	}
1010 
1011 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1012 
1013 	/* defer register writes to a sleepable context */
1014 	schedule_work(&pdata->set_multicast);
1015 }
1016 
1017 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1018 				      u16 lcladv, u16 rmtadv)
1019 {
1020 	u32 flow = 0, fct_flow = 0;
1021 	int ret;
1022 
1023 	u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1024 
1025 	if (cap & FLOW_CTRL_TX)
1026 		flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
1027 
1028 	if (cap & FLOW_CTRL_RX)
1029 		flow |= FLOW_CR_RX_FCEN_;
1030 
1031 	if (dev->udev->speed == USB_SPEED_SUPER)
1032 		fct_flow = 0x817;
1033 	else if (dev->udev->speed == USB_SPEED_HIGH)
1034 		fct_flow = 0x211;
1035 
1036 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1037 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1038 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1039 
1040 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1041 
1042 	/* threshold value should be set before enabling flow */
1043 	ret = lan78xx_write_reg(dev, FLOW, flow);
1044 
1045 	return 0;
1046 }
1047 
1048 static int lan78xx_link_reset(struct lan78xx_net *dev)
1049 {
1050 	struct mii_if_info *mii = &dev->mii;
1051 	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1052 	u16 ladv, radv;
1053 	int ret;
1054 	u32 buf;
1055 
1056 	/* clear PHY interrupt status */
1057 	/* VTSE PHY */
1058 	ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
1059 	if (unlikely(ret < 0))
1060 		return -EIO;
1061 
1062 	/* clear LAN78xx interrupt status */
1063 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1064 	if (unlikely(ret < 0))
1065 		return -EIO;
1066 
1067 	if (!mii_link_ok(mii) && dev->link_on) {
1068 		dev->link_on = false;
1069 		netif_carrier_off(dev->net);
1070 
1071 		/* reset MAC */
1072 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1073 		if (unlikely(ret < 0))
1074 			return -EIO;
1075 		buf |= MAC_CR_RST_;
1076 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1077 		if (unlikely(ret < 0))
1078 			return -EIO;
1079 	} else if (mii_link_ok(mii) && !dev->link_on) {
1080 		dev->link_on = true;
1081 
1082 		mii_check_media(mii, 1, 1);
1083 		mii_ethtool_gset(&dev->mii, &ecmd);
1084 
1085 		mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
1086 
1087 		if (dev->udev->speed == USB_SPEED_SUPER) {
1088 			if (ethtool_cmd_speed(&ecmd) == 1000) {
1089 				/* disable U2 */
1090 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1091 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1092 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1093 				/* enable U1 */
1094 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1095 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1096 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1097 			} else {
1098 				/* enable U1 & U2 */
1099 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1100 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1101 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1102 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1103 			}
1104 		}
1105 
1106 		ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
1107 		if (unlikely(ladv < 0))
1108 			return -EIO;
1109 
1110 		radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
1111 		if (unlikely(radv < 0))
1112 			return -EIO;
1113 
1114 		netif_dbg(dev, link, dev->net,
1115 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1116 			  ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1117 
1118 		ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1119 		netif_carrier_on(dev->net);
1120 	}
1121 
1122 	return ret;
1123 }
1124 
1125 /* some work can't be done in tasklets, so we use keventd
1126  *
1127  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1128  * but tasklet_schedule() doesn't.	hope the failure is rare.
1129  */
1130 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1131 {
1132 	set_bit(work, &dev->flags);
1133 	if (!schedule_delayed_work(&dev->wq, 0))
1134 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1135 }
1136 
1137 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1138 {
1139 	u32 intdata;
1140 
1141 	if (urb->actual_length != 4) {
1142 		netdev_warn(dev->net,
1143 			    "unexpected urb length %d", urb->actual_length);
1144 		return;
1145 	}
1146 
1147 	memcpy(&intdata, urb->transfer_buffer, 4);
1148 	le32_to_cpus(&intdata);
1149 
1150 	if (intdata & INT_ENP_PHY_INT) {
1151 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1152 			  lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1153 	} else
1154 		netdev_warn(dev->net,
1155 			    "unexpected interrupt: 0x%08x\n", intdata);
1156 }
1157 
1158 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1159 {
1160 	return MAX_EEPROM_SIZE;
1161 }
1162 
1163 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1164 				      struct ethtool_eeprom *ee, u8 *data)
1165 {
1166 	struct lan78xx_net *dev = netdev_priv(netdev);
1167 
1168 	ee->magic = LAN78XX_EEPROM_MAGIC;
1169 
1170 	return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1171 }
1172 
1173 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1174 				      struct ethtool_eeprom *ee, u8 *data)
1175 {
1176 	struct lan78xx_net *dev = netdev_priv(netdev);
1177 
1178 	/* Allow entire eeprom update only */
1179 	if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1180 	    (ee->offset == 0) &&
1181 	    (ee->len == 512) &&
1182 	    (data[0] == EEPROM_INDICATOR))
1183 		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1184 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1185 		 (ee->offset == 0) &&
1186 		 (ee->len == 512) &&
1187 		 (data[0] == OTP_INDICATOR_1))
1188 		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1189 
1190 	return -EINVAL;
1191 }
1192 
1193 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1194 				u8 *data)
1195 {
1196 	if (stringset == ETH_SS_STATS)
1197 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1198 }
1199 
1200 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1201 {
1202 	if (sset == ETH_SS_STATS)
1203 		return ARRAY_SIZE(lan78xx_gstrings);
1204 	else
1205 		return -EOPNOTSUPP;
1206 }
1207 
1208 static void lan78xx_get_stats(struct net_device *netdev,
1209 			      struct ethtool_stats *stats, u64 *data)
1210 {
1211 	struct lan78xx_net *dev = netdev_priv(netdev);
1212 	struct lan78xx_statstage lan78xx_stat;
1213 	u32 *p;
1214 	int i;
1215 
1216 	if (usb_autopm_get_interface(dev->intf) < 0)
1217 		return;
1218 
1219 	if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1220 		p = (u32 *)&lan78xx_stat;
1221 		for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1222 			data[i] = p[i];
1223 	}
1224 
1225 	usb_autopm_put_interface(dev->intf);
1226 }
1227 
1228 static void lan78xx_get_wol(struct net_device *netdev,
1229 			    struct ethtool_wolinfo *wol)
1230 {
1231 	struct lan78xx_net *dev = netdev_priv(netdev);
1232 	int ret;
1233 	u32 buf;
1234 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1235 
1236 	if (usb_autopm_get_interface(dev->intf) < 0)
1237 			return;
1238 
1239 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1240 	if (unlikely(ret < 0)) {
1241 		wol->supported = 0;
1242 		wol->wolopts = 0;
1243 	} else {
1244 		if (buf & USB_CFG_RMT_WKP_) {
1245 			wol->supported = WAKE_ALL;
1246 			wol->wolopts = pdata->wol;
1247 		} else {
1248 			wol->supported = 0;
1249 			wol->wolopts = 0;
1250 		}
1251 	}
1252 
1253 	usb_autopm_put_interface(dev->intf);
1254 }
1255 
1256 static int lan78xx_set_wol(struct net_device *netdev,
1257 			   struct ethtool_wolinfo *wol)
1258 {
1259 	struct lan78xx_net *dev = netdev_priv(netdev);
1260 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1261 	int ret;
1262 
1263 	ret = usb_autopm_get_interface(dev->intf);
1264 	if (ret < 0)
1265 		return ret;
1266 
1267 	pdata->wol = 0;
1268 	if (wol->wolopts & WAKE_UCAST)
1269 		pdata->wol |= WAKE_UCAST;
1270 	if (wol->wolopts & WAKE_MCAST)
1271 		pdata->wol |= WAKE_MCAST;
1272 	if (wol->wolopts & WAKE_BCAST)
1273 		pdata->wol |= WAKE_BCAST;
1274 	if (wol->wolopts & WAKE_MAGIC)
1275 		pdata->wol |= WAKE_MAGIC;
1276 	if (wol->wolopts & WAKE_PHY)
1277 		pdata->wol |= WAKE_PHY;
1278 	if (wol->wolopts & WAKE_ARP)
1279 		pdata->wol |= WAKE_ARP;
1280 
1281 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1282 
1283 	usb_autopm_put_interface(dev->intf);
1284 
1285 	return ret;
1286 }
1287 
1288 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1289 {
1290 	struct lan78xx_net *dev = netdev_priv(net);
1291 	int ret;
1292 	u32 buf;
1293 	u32 adv, lpadv;
1294 
1295 	ret = usb_autopm_get_interface(dev->intf);
1296 	if (ret < 0)
1297 		return ret;
1298 
1299 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1300 	if (buf & MAC_CR_EEE_EN_) {
1301 		buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1302 				       PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
1303 		adv = mmd_eee_adv_to_ethtool_adv_t(buf);
1304 		buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1305 				       PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
1306 		lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
1307 
1308 		edata->eee_enabled = true;
1309 		edata->supported = true;
1310 		edata->eee_active = !!(adv & lpadv);
1311 		edata->advertised = adv;
1312 		edata->lp_advertised = lpadv;
1313 		edata->tx_lpi_enabled = true;
1314 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1315 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1316 		edata->tx_lpi_timer = buf;
1317 	} else {
1318 		buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1319 				       PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
1320 		lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
1321 
1322 		edata->eee_enabled = false;
1323 		edata->eee_active = false;
1324 		edata->supported = false;
1325 		edata->advertised = 0;
1326 		edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
1327 		edata->tx_lpi_enabled = false;
1328 		edata->tx_lpi_timer = 0;
1329 	}
1330 
1331 	usb_autopm_put_interface(dev->intf);
1332 
1333 	return 0;
1334 }
1335 
1336 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1337 {
1338 	struct lan78xx_net *dev = netdev_priv(net);
1339 	int ret;
1340 	u32 buf;
1341 
1342 	ret = usb_autopm_get_interface(dev->intf);
1343 	if (ret < 0)
1344 		return ret;
1345 
1346 	if (edata->eee_enabled) {
1347 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1348 		buf |= MAC_CR_EEE_EN_;
1349 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1350 
1351 		buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
1352 		lan78xx_mmd_write(dev->net, dev->mii.phy_id,
1353 				  PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
1354 	} else {
1355 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1356 		buf &= ~MAC_CR_EEE_EN_;
1357 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1358 	}
1359 
1360 	usb_autopm_put_interface(dev->intf);
1361 
1362 	return 0;
1363 }
1364 
1365 static u32 lan78xx_get_link(struct net_device *net)
1366 {
1367 	struct lan78xx_net *dev = netdev_priv(net);
1368 
1369 	return mii_link_ok(&dev->mii);
1370 }
1371 
1372 int lan78xx_nway_reset(struct net_device *net)
1373 {
1374 	struct lan78xx_net *dev = netdev_priv(net);
1375 
1376 	if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1377 		return -EOPNOTSUPP;
1378 
1379 	return mii_nway_restart(&dev->mii);
1380 }
1381 
1382 static void lan78xx_get_drvinfo(struct net_device *net,
1383 				struct ethtool_drvinfo *info)
1384 {
1385 	struct lan78xx_net *dev = netdev_priv(net);
1386 
1387 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1388 	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1389 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1390 }
1391 
1392 static u32 lan78xx_get_msglevel(struct net_device *net)
1393 {
1394 	struct lan78xx_net *dev = netdev_priv(net);
1395 
1396 	return dev->msg_enable;
1397 }
1398 
1399 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1400 {
1401 	struct lan78xx_net *dev = netdev_priv(net);
1402 
1403 	dev->msg_enable = level;
1404 }
1405 
1406 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1407 {
1408 	struct lan78xx_net *dev = netdev_priv(net);
1409 	struct mii_if_info *mii = &dev->mii;
1410 	int ret;
1411 	int buf;
1412 
1413 	if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1414 		return -EOPNOTSUPP;
1415 
1416 	ret = usb_autopm_get_interface(dev->intf);
1417 	if (ret < 0)
1418 		return ret;
1419 
1420 	ret = mii_ethtool_gset(&dev->mii, cmd);
1421 
1422 	mii->mdio_write(mii->dev, mii->phy_id,
1423 			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
1424 	buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
1425 	mii->mdio_write(mii->dev, mii->phy_id,
1426 			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
1427 
1428 	buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
1429 	if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
1430 		cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1431 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1432 	} else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
1433 		cmd->eth_tp_mdix = ETH_TP_MDI;
1434 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1435 	} else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
1436 		cmd->eth_tp_mdix = ETH_TP_MDI_X;
1437 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1438 	}
1439 
1440 	usb_autopm_put_interface(dev->intf);
1441 
1442 	return ret;
1443 }
1444 
1445 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1446 {
1447 	struct lan78xx_net *dev = netdev_priv(net);
1448 	struct mii_if_info *mii = &dev->mii;
1449 	int ret = 0;
1450 	int temp;
1451 
1452 	if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1453 		return -EOPNOTSUPP;
1454 
1455 	ret = usb_autopm_get_interface(dev->intf);
1456 	if (ret < 0)
1457 		return ret;
1458 
1459 	if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1460 		if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
1461 			mii->mdio_write(mii->dev, mii->phy_id,
1462 					PHY_EXT_GPIO_PAGE,
1463 					PHY_EXT_GPIO_PAGE_SPACE_1);
1464 			temp = mii->mdio_read(mii->dev, mii->phy_id,
1465 					PHY_EXT_MODE_CTRL);
1466 			temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1467 			mii->mdio_write(mii->dev, mii->phy_id,
1468 					PHY_EXT_MODE_CTRL,
1469 					temp | PHY_EXT_MODE_CTRL_MDI_);
1470 			mii->mdio_write(mii->dev, mii->phy_id,
1471 					PHY_EXT_GPIO_PAGE,
1472 					PHY_EXT_GPIO_PAGE_SPACE_0);
1473 		} else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
1474 			mii->mdio_write(mii->dev, mii->phy_id,
1475 					PHY_EXT_GPIO_PAGE,
1476 					PHY_EXT_GPIO_PAGE_SPACE_1);
1477 			temp = mii->mdio_read(mii->dev, mii->phy_id,
1478 					PHY_EXT_MODE_CTRL);
1479 			temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1480 			mii->mdio_write(mii->dev, mii->phy_id,
1481 					PHY_EXT_MODE_CTRL,
1482 					temp | PHY_EXT_MODE_CTRL_MDI_X_);
1483 			mii->mdio_write(mii->dev, mii->phy_id,
1484 					PHY_EXT_GPIO_PAGE,
1485 					PHY_EXT_GPIO_PAGE_SPACE_0);
1486 		} else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
1487 			mii->mdio_write(mii->dev, mii->phy_id,
1488 					PHY_EXT_GPIO_PAGE,
1489 					PHY_EXT_GPIO_PAGE_SPACE_1);
1490 			temp = mii->mdio_read(mii->dev, mii->phy_id,
1491 							PHY_EXT_MODE_CTRL);
1492 			temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1493 			mii->mdio_write(mii->dev, mii->phy_id,
1494 					PHY_EXT_MODE_CTRL,
1495 					temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
1496 			mii->mdio_write(mii->dev, mii->phy_id,
1497 					PHY_EXT_GPIO_PAGE,
1498 					PHY_EXT_GPIO_PAGE_SPACE_0);
1499 		}
1500 	}
1501 
1502 	/* change speed & duplex */
1503 	ret = mii_ethtool_sset(&dev->mii, cmd);
1504 
1505 	if (!cmd->autoneg) {
1506 		/* force link down */
1507 		temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
1508 		mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
1509 				temp | BMCR_LOOPBACK);
1510 		mdelay(1);
1511 		mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
1512 	}
1513 
1514 	usb_autopm_put_interface(dev->intf);
1515 
1516 	return ret;
1517 }
1518 
1519 static const struct ethtool_ops lan78xx_ethtool_ops = {
1520 	.get_link	= lan78xx_get_link,
1521 	.nway_reset	= lan78xx_nway_reset,
1522 	.get_drvinfo	= lan78xx_get_drvinfo,
1523 	.get_msglevel	= lan78xx_get_msglevel,
1524 	.set_msglevel	= lan78xx_set_msglevel,
1525 	.get_settings	= lan78xx_get_settings,
1526 	.set_settings	= lan78xx_set_settings,
1527 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1528 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1529 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1530 	.get_ethtool_stats = lan78xx_get_stats,
1531 	.get_sset_count = lan78xx_get_sset_count,
1532 	.get_strings	= lan78xx_get_strings,
1533 	.get_wol	= lan78xx_get_wol,
1534 	.set_wol	= lan78xx_set_wol,
1535 	.get_eee	= lan78xx_get_eee,
1536 	.set_eee	= lan78xx_set_eee,
1537 };
1538 
1539 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1540 {
1541 	struct lan78xx_net *dev = netdev_priv(netdev);
1542 
1543 	if (!netif_running(netdev))
1544 		return -EINVAL;
1545 
1546 	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
1547 }
1548 
1549 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1550 {
1551 	u32 addr_lo, addr_hi;
1552 	int ret;
1553 	u8 addr[6];
1554 
1555 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1556 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1557 
1558 	addr[0] = addr_lo & 0xFF;
1559 	addr[1] = (addr_lo >> 8) & 0xFF;
1560 	addr[2] = (addr_lo >> 16) & 0xFF;
1561 	addr[3] = (addr_lo >> 24) & 0xFF;
1562 	addr[4] = addr_hi & 0xFF;
1563 	addr[5] = (addr_hi >> 8) & 0xFF;
1564 
1565 	if (!is_valid_ether_addr(addr)) {
1566 		/* reading mac address from EEPROM or OTP */
1567 		if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1568 					 addr) == 0) ||
1569 		    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1570 				      addr) == 0)) {
1571 			if (is_valid_ether_addr(addr)) {
1572 				/* eeprom values are valid so use them */
1573 				netif_dbg(dev, ifup, dev->net,
1574 					  "MAC address read from EEPROM");
1575 			} else {
1576 				/* generate random MAC */
1577 				random_ether_addr(addr);
1578 				netif_dbg(dev, ifup, dev->net,
1579 					  "MAC address set to random addr");
1580 			}
1581 
1582 			addr_lo = addr[0] | (addr[1] << 8) |
1583 				  (addr[2] << 16) | (addr[3] << 24);
1584 			addr_hi = addr[4] | (addr[5] << 8);
1585 
1586 			ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1587 			ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1588 		} else {
1589 			/* generate random MAC */
1590 			random_ether_addr(addr);
1591 			netif_dbg(dev, ifup, dev->net,
1592 				  "MAC address set to random addr");
1593 		}
1594 	}
1595 
1596 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1597 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1598 
1599 	ether_addr_copy(dev->net->dev_addr, addr);
1600 }
1601 
1602 static void lan78xx_mii_init(struct lan78xx_net *dev)
1603 {
1604 	/* Initialize MII structure */
1605 	dev->mii.dev = dev->net;
1606 	dev->mii.mdio_read = lan78xx_mdio_read;
1607 	dev->mii.mdio_write = lan78xx_mdio_write;
1608 	dev->mii.phy_id_mask = 0x1f;
1609 	dev->mii.reg_num_mask = 0x1f;
1610 	dev->mii.phy_id = INTERNAL_PHY_ID;
1611 	dev->mii.supports_gmii = true;
1612 }
1613 
1614 static int lan78xx_phy_init(struct lan78xx_net *dev)
1615 {
1616 	int temp;
1617 	struct mii_if_info *mii = &dev->mii;
1618 
1619 	if ((!mii->mdio_write) || (!mii->mdio_read))
1620 		return -EOPNOTSUPP;
1621 
1622 	temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
1623 	temp |= ADVERTISE_ALL;
1624 	mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
1625 			temp | ADVERTISE_CSMA |
1626 			ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1627 
1628 	/* set to AUTOMDIX */
1629 	mii->mdio_write(mii->dev, mii->phy_id,
1630 			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
1631 	temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
1632 	temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1633 	mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
1634 			temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
1635 	mii->mdio_write(mii->dev, mii->phy_id,
1636 			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
1637 	dev->mdix_ctrl = ETH_TP_MDI_AUTO;
1638 
1639 	/* MAC doesn't support 1000HD */
1640 	temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
1641 	mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
1642 			temp & ~ADVERTISE_1000HALF);
1643 
1644 	/* clear interrupt */
1645 	mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
1646 	mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
1647 			PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
1648 			PHY_VTSE_INT_MASK_LINK_CHANGE_);
1649 
1650 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1651 
1652 	return 0;
1653 }
1654 
1655 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1656 {
1657 	int ret = 0;
1658 	u32 buf;
1659 	bool rxenabled;
1660 
1661 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1662 
1663 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1664 
1665 	if (rxenabled) {
1666 		buf &= ~MAC_RX_RXEN_;
1667 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1668 	}
1669 
1670 	/* add 4 to size for FCS */
1671 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
1672 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1673 
1674 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
1675 
1676 	if (rxenabled) {
1677 		buf |= MAC_RX_RXEN_;
1678 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1679 	}
1680 
1681 	return 0;
1682 }
1683 
1684 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1685 {
1686 	struct sk_buff *skb;
1687 	unsigned long flags;
1688 	int count = 0;
1689 
1690 	spin_lock_irqsave(&q->lock, flags);
1691 	while (!skb_queue_empty(q)) {
1692 		struct skb_data	*entry;
1693 		struct urb *urb;
1694 		int ret;
1695 
1696 		skb_queue_walk(q, skb) {
1697 			entry = (struct skb_data *)skb->cb;
1698 			if (entry->state != unlink_start)
1699 				goto found;
1700 		}
1701 		break;
1702 found:
1703 		entry->state = unlink_start;
1704 		urb = entry->urb;
1705 
1706 		/* Get reference count of the URB to avoid it to be
1707 		 * freed during usb_unlink_urb, which may trigger
1708 		 * use-after-free problem inside usb_unlink_urb since
1709 		 * usb_unlink_urb is always racing with .complete
1710 		 * handler(include defer_bh).
1711 		 */
1712 		usb_get_urb(urb);
1713 		spin_unlock_irqrestore(&q->lock, flags);
1714 		/* during some PM-driven resume scenarios,
1715 		 * these (async) unlinks complete immediately
1716 		 */
1717 		ret = usb_unlink_urb(urb);
1718 		if (ret != -EINPROGRESS && ret != 0)
1719 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1720 		else
1721 			count++;
1722 		usb_put_urb(urb);
1723 		spin_lock_irqsave(&q->lock, flags);
1724 	}
1725 	spin_unlock_irqrestore(&q->lock, flags);
1726 	return count;
1727 }
1728 
1729 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1730 {
1731 	struct lan78xx_net *dev = netdev_priv(netdev);
1732 	int ll_mtu = new_mtu + netdev->hard_header_len;
1733 	int old_hard_mtu = dev->hard_mtu;
1734 	int old_rx_urb_size = dev->rx_urb_size;
1735 	int ret;
1736 
1737 	if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1738 		return -EINVAL;
1739 
1740 	if (new_mtu <= 0)
1741 		return -EINVAL;
1742 	/* no second zero-length packet read wanted after mtu-sized packets */
1743 	if ((ll_mtu % dev->maxpacket) == 0)
1744 		return -EDOM;
1745 
1746 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1747 
1748 	netdev->mtu = new_mtu;
1749 
1750 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1751 	if (dev->rx_urb_size == old_hard_mtu) {
1752 		dev->rx_urb_size = dev->hard_mtu;
1753 		if (dev->rx_urb_size > old_rx_urb_size) {
1754 			if (netif_running(dev->net)) {
1755 				unlink_urbs(dev, &dev->rxq);
1756 				tasklet_schedule(&dev->bh);
1757 			}
1758 		}
1759 	}
1760 
1761 	return 0;
1762 }
1763 
1764 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1765 {
1766 	struct lan78xx_net *dev = netdev_priv(netdev);
1767 	struct sockaddr *addr = p;
1768 	u32 addr_lo, addr_hi;
1769 	int ret;
1770 
1771 	if (netif_running(netdev))
1772 		return -EBUSY;
1773 
1774 	if (!is_valid_ether_addr(addr->sa_data))
1775 		return -EADDRNOTAVAIL;
1776 
1777 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
1778 
1779 	addr_lo = netdev->dev_addr[0] |
1780 		  netdev->dev_addr[1] << 8 |
1781 		  netdev->dev_addr[2] << 16 |
1782 		  netdev->dev_addr[3] << 24;
1783 	addr_hi = netdev->dev_addr[4] |
1784 		  netdev->dev_addr[5] << 8;
1785 
1786 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1787 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1788 
1789 	return 0;
1790 }
1791 
1792 /* Enable or disable Rx checksum offload engine */
1793 static int lan78xx_set_features(struct net_device *netdev,
1794 				netdev_features_t features)
1795 {
1796 	struct lan78xx_net *dev = netdev_priv(netdev);
1797 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1798 	unsigned long flags;
1799 	int ret;
1800 
1801 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1802 
1803 	if (features & NETIF_F_RXCSUM) {
1804 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1805 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1806 	} else {
1807 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1808 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1809 	}
1810 
1811 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1812 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1813 	else
1814 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1815 
1816 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1817 
1818 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1819 
1820 	return 0;
1821 }
1822 
1823 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1824 {
1825 	struct lan78xx_priv *pdata =
1826 			container_of(param, struct lan78xx_priv, set_vlan);
1827 	struct lan78xx_net *dev = pdata->dev;
1828 
1829 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1830 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1831 }
1832 
1833 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1834 				   __be16 proto, u16 vid)
1835 {
1836 	struct lan78xx_net *dev = netdev_priv(netdev);
1837 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1838 	u16 vid_bit_index;
1839 	u16 vid_dword_index;
1840 
1841 	vid_dword_index = (vid >> 5) & 0x7F;
1842 	vid_bit_index = vid & 0x1F;
1843 
1844 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1845 
1846 	/* defer register writes to a sleepable context */
1847 	schedule_work(&pdata->set_vlan);
1848 
1849 	return 0;
1850 }
1851 
1852 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1853 				    __be16 proto, u16 vid)
1854 {
1855 	struct lan78xx_net *dev = netdev_priv(netdev);
1856 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1857 	u16 vid_bit_index;
1858 	u16 vid_dword_index;
1859 
1860 	vid_dword_index = (vid >> 5) & 0x7F;
1861 	vid_bit_index = vid & 0x1F;
1862 
1863 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1864 
1865 	/* defer register writes to a sleepable context */
1866 	schedule_work(&pdata->set_vlan);
1867 
1868 	return 0;
1869 }
1870 
1871 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1872 {
1873 	int ret;
1874 	u32 buf;
1875 	u32 regs[6] = { 0 };
1876 
1877 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1878 	if (buf & USB_CFG1_LTM_ENABLE_) {
1879 		u8 temp[2];
1880 		/* Get values from EEPROM first */
1881 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1882 			if (temp[0] == 24) {
1883 				ret = lan78xx_read_raw_eeprom(dev,
1884 							      temp[1] * 2,
1885 							      24,
1886 							      (u8 *)regs);
1887 				if (ret < 0)
1888 					return;
1889 			}
1890 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1891 			if (temp[0] == 24) {
1892 				ret = lan78xx_read_raw_otp(dev,
1893 							   temp[1] * 2,
1894 							   24,
1895 							   (u8 *)regs);
1896 				if (ret < 0)
1897 					return;
1898 			}
1899 		}
1900 	}
1901 
1902 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1903 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1904 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1905 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1906 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1907 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1908 }
1909 
1910 static int lan78xx_reset(struct lan78xx_net *dev)
1911 {
1912 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1913 	u32 buf;
1914 	int ret = 0;
1915 	unsigned long timeout;
1916 
1917 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1918 	buf |= HW_CFG_LRST_;
1919 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
1920 
1921 	timeout = jiffies + HZ;
1922 	do {
1923 		mdelay(1);
1924 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1925 		if (time_after(jiffies, timeout)) {
1926 			netdev_warn(dev->net,
1927 				    "timeout on completion of LiteReset");
1928 			return -EIO;
1929 		}
1930 	} while (buf & HW_CFG_LRST_);
1931 
1932 	lan78xx_init_mac_address(dev);
1933 
1934 	/* Respond to the IN token with a NAK */
1935 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1936 	buf |= USB_CFG_BIR_;
1937 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1938 
1939 	/* Init LTM */
1940 	lan78xx_init_ltm(dev);
1941 
1942 	dev->net->hard_header_len += TX_OVERHEAD;
1943 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1944 
1945 	if (dev->udev->speed == USB_SPEED_SUPER) {
1946 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1947 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1948 		dev->rx_qlen = 4;
1949 		dev->tx_qlen = 4;
1950 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
1951 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1952 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1953 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1954 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1955 	} else {
1956 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1957 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1958 		dev->rx_qlen = 4;
1959 	}
1960 
1961 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1962 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1963 
1964 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1965 	buf |= HW_CFG_MEF_;
1966 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
1967 
1968 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1969 	buf |= USB_CFG_BCE_;
1970 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1971 
1972 	/* set FIFO sizes */
1973 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1974 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1975 
1976 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1977 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1978 
1979 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1980 	ret = lan78xx_write_reg(dev, FLOW, 0);
1981 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1982 
1983 	/* Don't need rfe_ctl_lock during initialisation */
1984 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1985 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1986 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1987 
1988 	/* Enable or disable checksum offload engines */
1989 	lan78xx_set_features(dev->net, dev->net->features);
1990 
1991 	lan78xx_set_multicast(dev->net);
1992 
1993 	/* reset PHY */
1994 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1995 	buf |= PMT_CTL_PHY_RST_;
1996 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1997 
1998 	timeout = jiffies + HZ;
1999 	do {
2000 		mdelay(1);
2001 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2002 		if (time_after(jiffies, timeout)) {
2003 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2004 			return -EIO;
2005 		}
2006 	} while (buf & PMT_CTL_PHY_RST_);
2007 
2008 	lan78xx_mii_init(dev);
2009 
2010 	ret = lan78xx_phy_init(dev);
2011 
2012 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2013 
2014 	buf |= MAC_CR_GMII_EN_;
2015 	buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2016 
2017 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2018 
2019 	/* enable on PHY */
2020 	if (buf & MAC_CR_EEE_EN_)
2021 		lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
2022 
2023 	/* enable PHY interrupts */
2024 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2025 	buf |= INT_ENP_PHY_INT;
2026 	ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2027 
2028 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2029 	buf |= MAC_TX_TXEN_;
2030 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2031 
2032 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2033 	buf |= FCT_TX_CTL_EN_;
2034 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2035 
2036 	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2037 
2038 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2039 	buf |= MAC_RX_RXEN_;
2040 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2041 
2042 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2043 	buf |= FCT_RX_CTL_EN_;
2044 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2045 
2046 	if (!mii_nway_restart(&dev->mii))
2047 		netif_dbg(dev, link, dev->net, "autoneg initiated");
2048 
2049 	return 0;
2050 }
2051 
2052 static int lan78xx_open(struct net_device *net)
2053 {
2054 	struct lan78xx_net *dev = netdev_priv(net);
2055 	int ret;
2056 
2057 	ret = usb_autopm_get_interface(dev->intf);
2058 	if (ret < 0)
2059 		goto out;
2060 
2061 	ret = lan78xx_reset(dev);
2062 	if (ret < 0)
2063 		goto done;
2064 
2065 	/* for Link Check */
2066 	if (dev->urb_intr) {
2067 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2068 		if (ret < 0) {
2069 			netif_err(dev, ifup, dev->net,
2070 				  "intr submit %d\n", ret);
2071 			goto done;
2072 		}
2073 	}
2074 
2075 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2076 
2077 	netif_start_queue(net);
2078 
2079 	dev->link_on = false;
2080 
2081 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2082 done:
2083 	usb_autopm_put_interface(dev->intf);
2084 
2085 out:
2086 	return ret;
2087 }
2088 
2089 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2090 {
2091 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2092 	DECLARE_WAITQUEUE(wait, current);
2093 	int temp;
2094 
2095 	/* ensure there are no more active urbs */
2096 	add_wait_queue(&unlink_wakeup, &wait);
2097 	set_current_state(TASK_UNINTERRUPTIBLE);
2098 	dev->wait = &unlink_wakeup;
2099 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2100 
2101 	/* maybe wait for deletions to finish. */
2102 	while (!skb_queue_empty(&dev->rxq) &&
2103 	       !skb_queue_empty(&dev->txq) &&
2104 	       !skb_queue_empty(&dev->done)) {
2105 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2106 		set_current_state(TASK_UNINTERRUPTIBLE);
2107 		netif_dbg(dev, ifdown, dev->net,
2108 			  "waited for %d urb completions\n", temp);
2109 	}
2110 	set_current_state(TASK_RUNNING);
2111 	dev->wait = NULL;
2112 	remove_wait_queue(&unlink_wakeup, &wait);
2113 }
2114 
2115 int lan78xx_stop(struct net_device *net)
2116 {
2117 	struct lan78xx_net		*dev = netdev_priv(net);
2118 
2119 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2120 	netif_stop_queue(net);
2121 
2122 	netif_info(dev, ifdown, dev->net,
2123 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2124 		   net->stats.rx_packets, net->stats.tx_packets,
2125 		   net->stats.rx_errors, net->stats.tx_errors);
2126 
2127 	lan78xx_terminate_urbs(dev);
2128 
2129 	usb_kill_urb(dev->urb_intr);
2130 
2131 	skb_queue_purge(&dev->rxq_pause);
2132 
2133 	/* deferred work (task, timer, softirq) must also stop.
2134 	 * can't flush_scheduled_work() until we drop rtnl (later),
2135 	 * else workers could deadlock; so make workers a NOP.
2136 	 */
2137 	dev->flags = 0;
2138 	cancel_delayed_work_sync(&dev->wq);
2139 	tasklet_kill(&dev->bh);
2140 
2141 	usb_autopm_put_interface(dev->intf);
2142 
2143 	return 0;
2144 }
2145 
2146 static int lan78xx_linearize(struct sk_buff *skb)
2147 {
2148 	return skb_linearize(skb);
2149 }
2150 
2151 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2152 				       struct sk_buff *skb, gfp_t flags)
2153 {
2154 	u32 tx_cmd_a, tx_cmd_b;
2155 
2156 	if (skb_headroom(skb) < TX_OVERHEAD) {
2157 		struct sk_buff *skb2;
2158 
2159 		skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2160 		dev_kfree_skb_any(skb);
2161 		skb = skb2;
2162 		if (!skb)
2163 			return NULL;
2164 	}
2165 
2166 	if (lan78xx_linearize(skb) < 0)
2167 		return NULL;
2168 
2169 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2170 
2171 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2172 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2173 
2174 	tx_cmd_b = 0;
2175 	if (skb_is_gso(skb)) {
2176 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2177 
2178 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2179 
2180 		tx_cmd_a |= TX_CMD_A_LSO_;
2181 	}
2182 
2183 	if (skb_vlan_tag_present(skb)) {
2184 		tx_cmd_a |= TX_CMD_A_IVTG_;
2185 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2186 	}
2187 
2188 	skb_push(skb, 4);
2189 	cpu_to_le32s(&tx_cmd_b);
2190 	memcpy(skb->data, &tx_cmd_b, 4);
2191 
2192 	skb_push(skb, 4);
2193 	cpu_to_le32s(&tx_cmd_a);
2194 	memcpy(skb->data, &tx_cmd_a, 4);
2195 
2196 	return skb;
2197 }
2198 
2199 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2200 			       struct sk_buff_head *list, enum skb_state state)
2201 {
2202 	unsigned long flags;
2203 	enum skb_state old_state;
2204 	struct skb_data *entry = (struct skb_data *)skb->cb;
2205 
2206 	spin_lock_irqsave(&list->lock, flags);
2207 	old_state = entry->state;
2208 	entry->state = state;
2209 
2210 	__skb_unlink(skb, list);
2211 	spin_unlock(&list->lock);
2212 	spin_lock(&dev->done.lock);
2213 
2214 	__skb_queue_tail(&dev->done, skb);
2215 	if (skb_queue_len(&dev->done) == 1)
2216 		tasklet_schedule(&dev->bh);
2217 	spin_unlock_irqrestore(&dev->done.lock, flags);
2218 
2219 	return old_state;
2220 }
2221 
2222 static void tx_complete(struct urb *urb)
2223 {
2224 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2225 	struct skb_data *entry = (struct skb_data *)skb->cb;
2226 	struct lan78xx_net *dev = entry->dev;
2227 
2228 	if (urb->status == 0) {
2229 		dev->net->stats.tx_packets++;
2230 		dev->net->stats.tx_bytes += entry->length;
2231 	} else {
2232 		dev->net->stats.tx_errors++;
2233 
2234 		switch (urb->status) {
2235 		case -EPIPE:
2236 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2237 			break;
2238 
2239 		/* software-driven interface shutdown */
2240 		case -ECONNRESET:
2241 		case -ESHUTDOWN:
2242 			break;
2243 
2244 		case -EPROTO:
2245 		case -ETIME:
2246 		case -EILSEQ:
2247 			netif_stop_queue(dev->net);
2248 			break;
2249 		default:
2250 			netif_dbg(dev, tx_err, dev->net,
2251 				  "tx err %d\n", entry->urb->status);
2252 			break;
2253 		}
2254 	}
2255 
2256 	usb_autopm_put_interface_async(dev->intf);
2257 
2258 	defer_bh(dev, skb, &dev->txq, tx_done);
2259 }
2260 
2261 static void lan78xx_queue_skb(struct sk_buff_head *list,
2262 			      struct sk_buff *newsk, enum skb_state state)
2263 {
2264 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2265 
2266 	__skb_queue_tail(list, newsk);
2267 	entry->state = state;
2268 }
2269 
2270 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2271 {
2272 	struct lan78xx_net *dev = netdev_priv(net);
2273 	struct sk_buff *skb2 = NULL;
2274 
2275 	if (skb) {
2276 		skb_tx_timestamp(skb);
2277 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2278 	}
2279 
2280 	if (skb2) {
2281 		skb_queue_tail(&dev->txq_pend, skb2);
2282 
2283 		if (skb_queue_len(&dev->txq_pend) > 10)
2284 			netif_stop_queue(net);
2285 	} else {
2286 		netif_dbg(dev, tx_err, dev->net,
2287 			  "lan78xx_tx_prep return NULL\n");
2288 		dev->net->stats.tx_errors++;
2289 		dev->net->stats.tx_dropped++;
2290 	}
2291 
2292 	tasklet_schedule(&dev->bh);
2293 
2294 	return NETDEV_TX_OK;
2295 }
2296 
2297 int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2298 {
2299 	int tmp;
2300 	struct usb_host_interface *alt = NULL;
2301 	struct usb_host_endpoint *in = NULL, *out = NULL;
2302 	struct usb_host_endpoint *status = NULL;
2303 
2304 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2305 		unsigned ep;
2306 
2307 		in = NULL;
2308 		out = NULL;
2309 		status = NULL;
2310 		alt = intf->altsetting + tmp;
2311 
2312 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2313 			struct usb_host_endpoint *e;
2314 			int intr = 0;
2315 
2316 			e = alt->endpoint + ep;
2317 			switch (e->desc.bmAttributes) {
2318 			case USB_ENDPOINT_XFER_INT:
2319 				if (!usb_endpoint_dir_in(&e->desc))
2320 					continue;
2321 				intr = 1;
2322 				/* FALLTHROUGH */
2323 			case USB_ENDPOINT_XFER_BULK:
2324 				break;
2325 			default:
2326 				continue;
2327 			}
2328 			if (usb_endpoint_dir_in(&e->desc)) {
2329 				if (!intr && !in)
2330 					in = e;
2331 				else if (intr && !status)
2332 					status = e;
2333 			} else {
2334 				if (!out)
2335 					out = e;
2336 			}
2337 		}
2338 		if (in && out)
2339 			break;
2340 	}
2341 	if (!alt || !in || !out)
2342 		return -EINVAL;
2343 
2344 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2345 				       in->desc.bEndpointAddress &
2346 				       USB_ENDPOINT_NUMBER_MASK);
2347 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2348 					out->desc.bEndpointAddress &
2349 					USB_ENDPOINT_NUMBER_MASK);
2350 	dev->ep_intr = status;
2351 
2352 	return 0;
2353 }
2354 
2355 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2356 {
2357 	struct lan78xx_priv *pdata = NULL;
2358 	int ret;
2359 	int i;
2360 
2361 	ret = lan78xx_get_endpoints(dev, intf);
2362 
2363 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2364 
2365 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2366 	if (!pdata) {
2367 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2368 		return -ENOMEM;
2369 	}
2370 
2371 	pdata->dev = dev;
2372 
2373 	spin_lock_init(&pdata->rfe_ctl_lock);
2374 	mutex_init(&pdata->dataport_mutex);
2375 
2376 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2377 
2378 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2379 		pdata->vlan_table[i] = 0;
2380 
2381 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2382 
2383 	dev->net->features = 0;
2384 
2385 	if (DEFAULT_TX_CSUM_ENABLE)
2386 		dev->net->features |= NETIF_F_HW_CSUM;
2387 
2388 	if (DEFAULT_RX_CSUM_ENABLE)
2389 		dev->net->features |= NETIF_F_RXCSUM;
2390 
2391 	if (DEFAULT_TSO_CSUM_ENABLE)
2392 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2393 
2394 	dev->net->hw_features = dev->net->features;
2395 
2396 	/* Init all registers */
2397 	ret = lan78xx_reset(dev);
2398 
2399 	dev->net->flags |= IFF_MULTICAST;
2400 
2401 	pdata->wol = WAKE_MAGIC;
2402 
2403 	return 0;
2404 }
2405 
2406 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2407 {
2408 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2409 
2410 	if (pdata) {
2411 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2412 		kfree(pdata);
2413 		pdata = NULL;
2414 		dev->data[0] = 0;
2415 	}
2416 }
2417 
2418 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2419 				    struct sk_buff *skb,
2420 				    u32 rx_cmd_a, u32 rx_cmd_b)
2421 {
2422 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2423 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2424 		skb->ip_summed = CHECKSUM_NONE;
2425 	} else {
2426 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2427 		skb->ip_summed = CHECKSUM_COMPLETE;
2428 	}
2429 }
2430 
2431 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2432 {
2433 	int		status;
2434 
2435 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2436 		skb_queue_tail(&dev->rxq_pause, skb);
2437 		return;
2438 	}
2439 
2440 	skb->protocol = eth_type_trans(skb, dev->net);
2441 	dev->net->stats.rx_packets++;
2442 	dev->net->stats.rx_bytes += skb->len;
2443 
2444 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2445 		  skb->len + sizeof(struct ethhdr), skb->protocol);
2446 	memset(skb->cb, 0, sizeof(struct skb_data));
2447 
2448 	if (skb_defer_rx_timestamp(skb))
2449 		return;
2450 
2451 	status = netif_rx(skb);
2452 	if (status != NET_RX_SUCCESS)
2453 		netif_dbg(dev, rx_err, dev->net,
2454 			  "netif_rx status %d\n", status);
2455 }
2456 
2457 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2458 {
2459 	if (skb->len < dev->net->hard_header_len)
2460 		return 0;
2461 
2462 	while (skb->len > 0) {
2463 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
2464 		u16 rx_cmd_c;
2465 		struct sk_buff *skb2;
2466 		unsigned char *packet;
2467 
2468 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2469 		le32_to_cpus(&rx_cmd_a);
2470 		skb_pull(skb, sizeof(rx_cmd_a));
2471 
2472 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2473 		le32_to_cpus(&rx_cmd_b);
2474 		skb_pull(skb, sizeof(rx_cmd_b));
2475 
2476 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2477 		le16_to_cpus(&rx_cmd_c);
2478 		skb_pull(skb, sizeof(rx_cmd_c));
2479 
2480 		packet = skb->data;
2481 
2482 		/* get the packet length */
2483 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2484 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2485 
2486 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2487 			netif_dbg(dev, rx_err, dev->net,
2488 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
2489 		} else {
2490 			/* last frame in this batch */
2491 			if (skb->len == size) {
2492 				lan78xx_rx_csum_offload(dev, skb,
2493 							rx_cmd_a, rx_cmd_b);
2494 
2495 				skb_trim(skb, skb->len - 4); /* remove fcs */
2496 				skb->truesize = size + sizeof(struct sk_buff);
2497 
2498 				return 1;
2499 			}
2500 
2501 			skb2 = skb_clone(skb, GFP_ATOMIC);
2502 			if (unlikely(!skb2)) {
2503 				netdev_warn(dev->net, "Error allocating skb");
2504 				return 0;
2505 			}
2506 
2507 			skb2->len = size;
2508 			skb2->data = packet;
2509 			skb_set_tail_pointer(skb2, size);
2510 
2511 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2512 
2513 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
2514 			skb2->truesize = size + sizeof(struct sk_buff);
2515 
2516 			lan78xx_skb_return(dev, skb2);
2517 		}
2518 
2519 		skb_pull(skb, size);
2520 
2521 		/* padding bytes before the next frame starts */
2522 		if (skb->len)
2523 			skb_pull(skb, align_count);
2524 	}
2525 
2526 	if (unlikely(skb->len < 0)) {
2527 		netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
2528 		return 0;
2529 	}
2530 
2531 	return 1;
2532 }
2533 
2534 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2535 {
2536 	if (!lan78xx_rx(dev, skb)) {
2537 		dev->net->stats.rx_errors++;
2538 		goto done;
2539 	}
2540 
2541 	if (skb->len) {
2542 		lan78xx_skb_return(dev, skb);
2543 		return;
2544 	}
2545 
2546 	netif_dbg(dev, rx_err, dev->net, "drop\n");
2547 	dev->net->stats.rx_errors++;
2548 done:
2549 	skb_queue_tail(&dev->done, skb);
2550 }
2551 
2552 static void rx_complete(struct urb *urb);
2553 
2554 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2555 {
2556 	struct sk_buff *skb;
2557 	struct skb_data *entry;
2558 	unsigned long lockflags;
2559 	size_t size = dev->rx_urb_size;
2560 	int ret = 0;
2561 
2562 	skb = netdev_alloc_skb_ip_align(dev->net, size);
2563 	if (!skb) {
2564 		usb_free_urb(urb);
2565 		return -ENOMEM;
2566 	}
2567 
2568 	entry = (struct skb_data *)skb->cb;
2569 	entry->urb = urb;
2570 	entry->dev = dev;
2571 	entry->length = 0;
2572 
2573 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2574 			  skb->data, size, rx_complete, skb);
2575 
2576 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
2577 
2578 	if (netif_device_present(dev->net) &&
2579 	    netif_running(dev->net) &&
2580 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2581 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2582 		ret = usb_submit_urb(urb, GFP_ATOMIC);
2583 		switch (ret) {
2584 		case 0:
2585 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2586 			break;
2587 		case -EPIPE:
2588 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2589 			break;
2590 		case -ENODEV:
2591 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
2592 			netif_device_detach(dev->net);
2593 			break;
2594 		case -EHOSTUNREACH:
2595 			ret = -ENOLINK;
2596 			break;
2597 		default:
2598 			netif_dbg(dev, rx_err, dev->net,
2599 				  "rx submit, %d\n", ret);
2600 			tasklet_schedule(&dev->bh);
2601 		}
2602 	} else {
2603 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2604 		ret = -ENOLINK;
2605 	}
2606 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2607 	if (ret) {
2608 		dev_kfree_skb_any(skb);
2609 		usb_free_urb(urb);
2610 	}
2611 	return ret;
2612 }
2613 
2614 static void rx_complete(struct urb *urb)
2615 {
2616 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
2617 	struct skb_data	*entry = (struct skb_data *)skb->cb;
2618 	struct lan78xx_net *dev = entry->dev;
2619 	int urb_status = urb->status;
2620 	enum skb_state state;
2621 
2622 	skb_put(skb, urb->actual_length);
2623 	state = rx_done;
2624 	entry->urb = NULL;
2625 
2626 	switch (urb_status) {
2627 	case 0:
2628 		if (skb->len < dev->net->hard_header_len) {
2629 			state = rx_cleanup;
2630 			dev->net->stats.rx_errors++;
2631 			dev->net->stats.rx_length_errors++;
2632 			netif_dbg(dev, rx_err, dev->net,
2633 				  "rx length %d\n", skb->len);
2634 		}
2635 		usb_mark_last_busy(dev->udev);
2636 		break;
2637 	case -EPIPE:
2638 		dev->net->stats.rx_errors++;
2639 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2640 		/* FALLTHROUGH */
2641 	case -ECONNRESET:				/* async unlink */
2642 	case -ESHUTDOWN:				/* hardware gone */
2643 		netif_dbg(dev, ifdown, dev->net,
2644 			  "rx shutdown, code %d\n", urb_status);
2645 		state = rx_cleanup;
2646 		entry->urb = urb;
2647 		urb = NULL;
2648 		break;
2649 	case -EPROTO:
2650 	case -ETIME:
2651 	case -EILSEQ:
2652 		dev->net->stats.rx_errors++;
2653 		state = rx_cleanup;
2654 		entry->urb = urb;
2655 		urb = NULL;
2656 		break;
2657 
2658 	/* data overrun ... flush fifo? */
2659 	case -EOVERFLOW:
2660 		dev->net->stats.rx_over_errors++;
2661 		/* FALLTHROUGH */
2662 
2663 	default:
2664 		state = rx_cleanup;
2665 		dev->net->stats.rx_errors++;
2666 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2667 		break;
2668 	}
2669 
2670 	state = defer_bh(dev, skb, &dev->rxq, state);
2671 
2672 	if (urb) {
2673 		if (netif_running(dev->net) &&
2674 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2675 		    state != unlink_start) {
2676 			rx_submit(dev, urb, GFP_ATOMIC);
2677 			return;
2678 		}
2679 		usb_free_urb(urb);
2680 	}
2681 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2682 }
2683 
2684 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2685 {
2686 	int length;
2687 	struct urb *urb = NULL;
2688 	struct skb_data *entry;
2689 	unsigned long flags;
2690 	struct sk_buff_head *tqp = &dev->txq_pend;
2691 	struct sk_buff *skb, *skb2;
2692 	int ret;
2693 	int count, pos;
2694 	int skb_totallen, pkt_cnt;
2695 
2696 	skb_totallen = 0;
2697 	pkt_cnt = 0;
2698 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2699 		if (skb_is_gso(skb)) {
2700 			if (pkt_cnt) {
2701 				/* handle previous packets first */
2702 				break;
2703 			}
2704 			length = skb->len;
2705 			skb2 = skb_dequeue(tqp);
2706 			goto gso_skb;
2707 		}
2708 
2709 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2710 			break;
2711 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2712 		pkt_cnt++;
2713 	}
2714 
2715 	/* copy to a single skb */
2716 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2717 	if (!skb)
2718 		goto drop;
2719 
2720 	skb_put(skb, skb_totallen);
2721 
2722 	for (count = pos = 0; count < pkt_cnt; count++) {
2723 		skb2 = skb_dequeue(tqp);
2724 		if (skb2) {
2725 			memcpy(skb->data + pos, skb2->data, skb2->len);
2726 			pos += roundup(skb2->len, sizeof(u32));
2727 			dev_kfree_skb(skb2);
2728 		}
2729 	}
2730 
2731 	length = skb_totallen;
2732 
2733 gso_skb:
2734 	urb = usb_alloc_urb(0, GFP_ATOMIC);
2735 	if (!urb) {
2736 		netif_dbg(dev, tx_err, dev->net, "no urb\n");
2737 		goto drop;
2738 	}
2739 
2740 	entry = (struct skb_data *)skb->cb;
2741 	entry->urb = urb;
2742 	entry->dev = dev;
2743 	entry->length = length;
2744 
2745 	spin_lock_irqsave(&dev->txq.lock, flags);
2746 	ret = usb_autopm_get_interface_async(dev->intf);
2747 	if (ret < 0) {
2748 		spin_unlock_irqrestore(&dev->txq.lock, flags);
2749 		goto drop;
2750 	}
2751 
2752 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2753 			  skb->data, skb->len, tx_complete, skb);
2754 
2755 	if (length % dev->maxpacket == 0) {
2756 		/* send USB_ZERO_PACKET */
2757 		urb->transfer_flags |= URB_ZERO_PACKET;
2758 	}
2759 
2760 #ifdef CONFIG_PM
2761 	/* if this triggers the device is still a sleep */
2762 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2763 		/* transmission will be done in resume */
2764 		usb_anchor_urb(urb, &dev->deferred);
2765 		/* no use to process more packets */
2766 		netif_stop_queue(dev->net);
2767 		usb_put_urb(urb);
2768 		spin_unlock_irqrestore(&dev->txq.lock, flags);
2769 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2770 		return;
2771 	}
2772 #endif
2773 
2774 	ret = usb_submit_urb(urb, GFP_ATOMIC);
2775 	switch (ret) {
2776 	case 0:
2777 		dev->net->trans_start = jiffies;
2778 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
2779 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2780 			netif_stop_queue(dev->net);
2781 		break;
2782 	case -EPIPE:
2783 		netif_stop_queue(dev->net);
2784 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2785 		usb_autopm_put_interface_async(dev->intf);
2786 		break;
2787 	default:
2788 		usb_autopm_put_interface_async(dev->intf);
2789 		netif_dbg(dev, tx_err, dev->net,
2790 			  "tx: submit urb err %d\n", ret);
2791 		break;
2792 	}
2793 
2794 	spin_unlock_irqrestore(&dev->txq.lock, flags);
2795 
2796 	if (ret) {
2797 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2798 drop:
2799 		dev->net->stats.tx_dropped++;
2800 		if (skb)
2801 			dev_kfree_skb_any(skb);
2802 		usb_free_urb(urb);
2803 	} else
2804 		netif_dbg(dev, tx_queued, dev->net,
2805 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
2806 }
2807 
2808 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2809 {
2810 	struct urb *urb;
2811 	int i;
2812 
2813 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2814 		for (i = 0; i < 10; i++) {
2815 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2816 				break;
2817 			urb = usb_alloc_urb(0, GFP_ATOMIC);
2818 			if (urb)
2819 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2820 					return;
2821 		}
2822 
2823 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2824 			tasklet_schedule(&dev->bh);
2825 	}
2826 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2827 		netif_wake_queue(dev->net);
2828 }
2829 
2830 static void lan78xx_bh(unsigned long param)
2831 {
2832 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
2833 	struct sk_buff *skb;
2834 	struct skb_data *entry;
2835 
2836 	while ((skb = skb_dequeue(&dev->done))) {
2837 		entry = (struct skb_data *)(skb->cb);
2838 		switch (entry->state) {
2839 		case rx_done:
2840 			entry->state = rx_cleanup;
2841 			rx_process(dev, skb);
2842 			continue;
2843 		case tx_done:
2844 			usb_free_urb(entry->urb);
2845 			dev_kfree_skb(skb);
2846 			continue;
2847 		case rx_cleanup:
2848 			usb_free_urb(entry->urb);
2849 			dev_kfree_skb(skb);
2850 			continue;
2851 		default:
2852 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
2853 			return;
2854 		}
2855 	}
2856 
2857 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
2858 		if (!skb_queue_empty(&dev->txq_pend))
2859 			lan78xx_tx_bh(dev);
2860 
2861 		if (!timer_pending(&dev->delay) &&
2862 		    !test_bit(EVENT_RX_HALT, &dev->flags))
2863 			lan78xx_rx_bh(dev);
2864 	}
2865 }
2866 
2867 static void lan78xx_delayedwork(struct work_struct *work)
2868 {
2869 	int status;
2870 	struct lan78xx_net *dev;
2871 
2872 	dev = container_of(work, struct lan78xx_net, wq.work);
2873 
2874 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2875 		unlink_urbs(dev, &dev->txq);
2876 		status = usb_autopm_get_interface(dev->intf);
2877 		if (status < 0)
2878 			goto fail_pipe;
2879 		status = usb_clear_halt(dev->udev, dev->pipe_out);
2880 		usb_autopm_put_interface(dev->intf);
2881 		if (status < 0 &&
2882 		    status != -EPIPE &&
2883 		    status != -ESHUTDOWN) {
2884 			if (netif_msg_tx_err(dev))
2885 fail_pipe:
2886 				netdev_err(dev->net,
2887 					   "can't clear tx halt, status %d\n",
2888 					   status);
2889 		} else {
2890 			clear_bit(EVENT_TX_HALT, &dev->flags);
2891 			if (status != -ESHUTDOWN)
2892 				netif_wake_queue(dev->net);
2893 		}
2894 	}
2895 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2896 		unlink_urbs(dev, &dev->rxq);
2897 		status = usb_autopm_get_interface(dev->intf);
2898 		if (status < 0)
2899 				goto fail_halt;
2900 		status = usb_clear_halt(dev->udev, dev->pipe_in);
2901 		usb_autopm_put_interface(dev->intf);
2902 		if (status < 0 &&
2903 		    status != -EPIPE &&
2904 		    status != -ESHUTDOWN) {
2905 			if (netif_msg_rx_err(dev))
2906 fail_halt:
2907 				netdev_err(dev->net,
2908 					   "can't clear rx halt, status %d\n",
2909 					   status);
2910 		} else {
2911 			clear_bit(EVENT_RX_HALT, &dev->flags);
2912 			tasklet_schedule(&dev->bh);
2913 		}
2914 	}
2915 
2916 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2917 		int ret = 0;
2918 
2919 		clear_bit(EVENT_LINK_RESET, &dev->flags);
2920 		status = usb_autopm_get_interface(dev->intf);
2921 		if (status < 0)
2922 			goto skip_reset;
2923 		if (lan78xx_link_reset(dev) < 0) {
2924 			usb_autopm_put_interface(dev->intf);
2925 skip_reset:
2926 			netdev_info(dev->net, "link reset failed (%d)\n",
2927 				    ret);
2928 		} else {
2929 			usb_autopm_put_interface(dev->intf);
2930 		}
2931 	}
2932 }
2933 
2934 static void intr_complete(struct urb *urb)
2935 {
2936 	struct lan78xx_net *dev = urb->context;
2937 	int status = urb->status;
2938 
2939 	switch (status) {
2940 	/* success */
2941 	case 0:
2942 		lan78xx_status(dev, urb);
2943 		break;
2944 
2945 	/* software-driven interface shutdown */
2946 	case -ENOENT:			/* urb killed */
2947 	case -ESHUTDOWN:		/* hardware gone */
2948 		netif_dbg(dev, ifdown, dev->net,
2949 			  "intr shutdown, code %d\n", status);
2950 		return;
2951 
2952 	/* NOTE:  not throttling like RX/TX, since this endpoint
2953 	 * already polls infrequently
2954 	 */
2955 	default:
2956 		netdev_dbg(dev->net, "intr status %d\n", status);
2957 		break;
2958 	}
2959 
2960 	if (!netif_running(dev->net))
2961 		return;
2962 
2963 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2964 	status = usb_submit_urb(urb, GFP_ATOMIC);
2965 	if (status != 0)
2966 		netif_err(dev, timer, dev->net,
2967 			  "intr resubmit --> %d\n", status);
2968 }
2969 
2970 static void lan78xx_disconnect(struct usb_interface *intf)
2971 {
2972 	struct lan78xx_net		*dev;
2973 	struct usb_device		*udev;
2974 	struct net_device		*net;
2975 
2976 	dev = usb_get_intfdata(intf);
2977 	usb_set_intfdata(intf, NULL);
2978 	if (!dev)
2979 		return;
2980 
2981 	udev = interface_to_usbdev(intf);
2982 
2983 	net = dev->net;
2984 	unregister_netdev(net);
2985 
2986 	cancel_delayed_work_sync(&dev->wq);
2987 
2988 	usb_scuttle_anchored_urbs(&dev->deferred);
2989 
2990 	lan78xx_unbind(dev, intf);
2991 
2992 	usb_kill_urb(dev->urb_intr);
2993 	usb_free_urb(dev->urb_intr);
2994 
2995 	free_netdev(net);
2996 	usb_put_dev(udev);
2997 }
2998 
2999 void lan78xx_tx_timeout(struct net_device *net)
3000 {
3001 	struct lan78xx_net *dev = netdev_priv(net);
3002 
3003 	unlink_urbs(dev, &dev->txq);
3004 	tasklet_schedule(&dev->bh);
3005 }
3006 
3007 static const struct net_device_ops lan78xx_netdev_ops = {
3008 	.ndo_open		= lan78xx_open,
3009 	.ndo_stop		= lan78xx_stop,
3010 	.ndo_start_xmit		= lan78xx_start_xmit,
3011 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3012 	.ndo_change_mtu		= lan78xx_change_mtu,
3013 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3014 	.ndo_validate_addr	= eth_validate_addr,
3015 	.ndo_do_ioctl		= lan78xx_ioctl,
3016 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3017 	.ndo_set_features	= lan78xx_set_features,
3018 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3019 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3020 };
3021 
3022 static int lan78xx_probe(struct usb_interface *intf,
3023 			 const struct usb_device_id *id)
3024 {
3025 	struct lan78xx_net *dev;
3026 	struct net_device *netdev;
3027 	struct usb_device *udev;
3028 	int ret;
3029 	unsigned maxp;
3030 	unsigned period;
3031 	u8 *buf = NULL;
3032 
3033 	udev = interface_to_usbdev(intf);
3034 	udev = usb_get_dev(udev);
3035 
3036 	ret = -ENOMEM;
3037 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3038 	if (!netdev) {
3039 			dev_err(&intf->dev, "Error: OOM\n");
3040 			goto out1;
3041 	}
3042 
3043 	/* netdev_printk() needs this */
3044 	SET_NETDEV_DEV(netdev, &intf->dev);
3045 
3046 	dev = netdev_priv(netdev);
3047 	dev->udev = udev;
3048 	dev->intf = intf;
3049 	dev->net = netdev;
3050 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3051 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3052 
3053 	skb_queue_head_init(&dev->rxq);
3054 	skb_queue_head_init(&dev->txq);
3055 	skb_queue_head_init(&dev->done);
3056 	skb_queue_head_init(&dev->rxq_pause);
3057 	skb_queue_head_init(&dev->txq_pend);
3058 	mutex_init(&dev->phy_mutex);
3059 
3060 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3061 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3062 	init_usb_anchor(&dev->deferred);
3063 
3064 	netdev->netdev_ops = &lan78xx_netdev_ops;
3065 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3066 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3067 
3068 	ret = lan78xx_bind(dev, intf);
3069 	if (ret < 0)
3070 		goto out2;
3071 	strcpy(netdev->name, "eth%d");
3072 
3073 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3074 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3075 
3076 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3077 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3078 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3079 
3080 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3081 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3082 
3083 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3084 					dev->ep_intr->desc.bEndpointAddress &
3085 					USB_ENDPOINT_NUMBER_MASK);
3086 	period = dev->ep_intr->desc.bInterval;
3087 
3088 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3089 	buf = kmalloc(maxp, GFP_KERNEL);
3090 	if (buf) {
3091 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3092 		if (!dev->urb_intr) {
3093 			kfree(buf);
3094 			goto out3;
3095 		} else {
3096 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3097 					 dev->pipe_intr, buf, maxp,
3098 					 intr_complete, dev, period);
3099 		}
3100 	}
3101 
3102 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3103 
3104 	/* driver requires remote-wakeup capability during autosuspend. */
3105 	intf->needs_remote_wakeup = 1;
3106 
3107 	ret = register_netdev(netdev);
3108 	if (ret != 0) {
3109 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3110 		goto out2;
3111 	}
3112 
3113 	usb_set_intfdata(intf, dev);
3114 
3115 	ret = device_set_wakeup_enable(&udev->dev, true);
3116 
3117 	 /* Default delay of 2sec has more overhead than advantage.
3118 	  * Set to 10sec as default.
3119 	  */
3120 	pm_runtime_set_autosuspend_delay(&udev->dev,
3121 					 DEFAULT_AUTOSUSPEND_DELAY);
3122 
3123 	return 0;
3124 
3125 out3:
3126 	lan78xx_unbind(dev, intf);
3127 out2:
3128 	free_netdev(netdev);
3129 out1:
3130 	usb_put_dev(udev);
3131 
3132 	return ret;
3133 }
3134 
3135 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3136 {
3137 	const u16 crc16poly = 0x8005;
3138 	int i;
3139 	u16 bit, crc, msb;
3140 	u8 data;
3141 
3142 	crc = 0xFFFF;
3143 	for (i = 0; i < len; i++) {
3144 		data = *buf++;
3145 		for (bit = 0; bit < 8; bit++) {
3146 			msb = crc >> 15;
3147 			crc <<= 1;
3148 
3149 			if (msb ^ (u16)(data & 1)) {
3150 				crc ^= crc16poly;
3151 				crc |= (u16)0x0001U;
3152 			}
3153 			data >>= 1;
3154 		}
3155 	}
3156 
3157 	return crc;
3158 }
3159 
3160 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3161 {
3162 	u32 buf;
3163 	int ret;
3164 	int mask_index;
3165 	u16 crc;
3166 	u32 temp_wucsr;
3167 	u32 temp_pmt_ctl;
3168 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3169 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3170 	const u8 arp_type[2] = { 0x08, 0x06 };
3171 
3172 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3173 	buf &= ~MAC_TX_TXEN_;
3174 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3175 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3176 	buf &= ~MAC_RX_RXEN_;
3177 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3178 
3179 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3180 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3181 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3182 
3183 	temp_wucsr = 0;
3184 
3185 	temp_pmt_ctl = 0;
3186 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3187 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3188 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3189 
3190 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3191 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3192 
3193 	mask_index = 0;
3194 	if (wol & WAKE_PHY) {
3195 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3196 
3197 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3198 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3199 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3200 	}
3201 	if (wol & WAKE_MAGIC) {
3202 		temp_wucsr |= WUCSR_MPEN_;
3203 
3204 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3205 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3206 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3207 	}
3208 	if (wol & WAKE_BCAST) {
3209 		temp_wucsr |= WUCSR_BCST_EN_;
3210 
3211 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3212 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3213 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3214 	}
3215 	if (wol & WAKE_MCAST) {
3216 		temp_wucsr |= WUCSR_WAKE_EN_;
3217 
3218 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3219 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3220 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3221 					WUF_CFGX_EN_ |
3222 					WUF_CFGX_TYPE_MCAST_ |
3223 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3224 					(crc & WUF_CFGX_CRC16_MASK_));
3225 
3226 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3227 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3228 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3229 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3230 		mask_index++;
3231 
3232 		/* for IPv6 Multicast */
3233 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3234 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3235 					WUF_CFGX_EN_ |
3236 					WUF_CFGX_TYPE_MCAST_ |
3237 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3238 					(crc & WUF_CFGX_CRC16_MASK_));
3239 
3240 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3241 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3242 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3243 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3244 		mask_index++;
3245 
3246 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3247 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3248 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3249 	}
3250 	if (wol & WAKE_UCAST) {
3251 		temp_wucsr |= WUCSR_PFDA_EN_;
3252 
3253 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3254 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3255 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3256 	}
3257 	if (wol & WAKE_ARP) {
3258 		temp_wucsr |= WUCSR_WAKE_EN_;
3259 
3260 		/* set WUF_CFG & WUF_MASK
3261 		 * for packettype (offset 12,13) = ARP (0x0806)
3262 		 */
3263 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3264 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3265 					WUF_CFGX_EN_ |
3266 					WUF_CFGX_TYPE_ALL_ |
3267 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3268 					(crc & WUF_CFGX_CRC16_MASK_));
3269 
3270 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3271 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3272 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3273 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3274 		mask_index++;
3275 
3276 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3277 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3278 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3279 	}
3280 
3281 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3282 
3283 	/* when multiple WOL bits are set */
3284 	if (hweight_long((unsigned long)wol) > 1) {
3285 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3286 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3287 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3288 	}
3289 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3290 
3291 	/* clear WUPS */
3292 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3293 	buf |= PMT_CTL_WUPS_MASK_;
3294 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3295 
3296 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3297 	buf |= MAC_RX_RXEN_;
3298 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3299 
3300 	return 0;
3301 }
3302 
3303 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3304 {
3305 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3306 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3307 	u32 buf;
3308 	int ret;
3309 	int event;
3310 
3311 	ret = 0;
3312 	event = message.event;
3313 
3314 	if (!dev->suspend_count++) {
3315 		spin_lock_irq(&dev->txq.lock);
3316 		/* don't autosuspend while transmitting */
3317 		if ((skb_queue_len(&dev->txq) ||
3318 		     skb_queue_len(&dev->txq_pend)) &&
3319 			PMSG_IS_AUTO(message)) {
3320 			spin_unlock_irq(&dev->txq.lock);
3321 			ret = -EBUSY;
3322 			goto out;
3323 		} else {
3324 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3325 			spin_unlock_irq(&dev->txq.lock);
3326 		}
3327 
3328 		/* stop TX & RX */
3329 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3330 		buf &= ~MAC_TX_TXEN_;
3331 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3332 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3333 		buf &= ~MAC_RX_RXEN_;
3334 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3335 
3336 		/* empty out the rx and queues */
3337 		netif_device_detach(dev->net);
3338 		lan78xx_terminate_urbs(dev);
3339 		usb_kill_urb(dev->urb_intr);
3340 
3341 		/* reattach */
3342 		netif_device_attach(dev->net);
3343 	}
3344 
3345 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3346 		if (PMSG_IS_AUTO(message)) {
3347 			/* auto suspend (selective suspend) */
3348 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3349 			buf &= ~MAC_TX_TXEN_;
3350 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3351 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3352 			buf &= ~MAC_RX_RXEN_;
3353 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3354 
3355 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3356 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3357 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3358 
3359 			/* set goodframe wakeup */
3360 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3361 
3362 			buf |= WUCSR_RFE_WAKE_EN_;
3363 			buf |= WUCSR_STORE_WAKE_;
3364 
3365 			ret = lan78xx_write_reg(dev, WUCSR, buf);
3366 
3367 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3368 
3369 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3370 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
3371 
3372 			buf |= PMT_CTL_PHY_WAKE_EN_;
3373 			buf |= PMT_CTL_WOL_EN_;
3374 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
3375 			buf |= PMT_CTL_SUS_MODE_3_;
3376 
3377 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3378 
3379 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3380 
3381 			buf |= PMT_CTL_WUPS_MASK_;
3382 
3383 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3384 
3385 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3386 			buf |= MAC_RX_RXEN_;
3387 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3388 		} else {
3389 			lan78xx_set_suspend(dev, pdata->wol);
3390 		}
3391 	}
3392 
3393 out:
3394 	return ret;
3395 }
3396 
3397 int lan78xx_resume(struct usb_interface *intf)
3398 {
3399 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3400 	struct sk_buff *skb;
3401 	struct urb *res;
3402 	int ret;
3403 	u32 buf;
3404 
3405 	if (!--dev->suspend_count) {
3406 		/* resume interrupt URBs */
3407 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3408 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
3409 
3410 		spin_lock_irq(&dev->txq.lock);
3411 		while ((res = usb_get_from_anchor(&dev->deferred))) {
3412 			skb = (struct sk_buff *)res->context;
3413 			ret = usb_submit_urb(res, GFP_ATOMIC);
3414 			if (ret < 0) {
3415 				dev_kfree_skb_any(skb);
3416 				usb_free_urb(res);
3417 				usb_autopm_put_interface_async(dev->intf);
3418 			} else {
3419 				dev->net->trans_start = jiffies;
3420 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
3421 			}
3422 		}
3423 
3424 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3425 		spin_unlock_irq(&dev->txq.lock);
3426 
3427 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3428 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3429 				netif_start_queue(dev->net);
3430 			tasklet_schedule(&dev->bh);
3431 		}
3432 	}
3433 
3434 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3435 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3436 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3437 
3438 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3439 					     WUCSR2_ARP_RCD_ |
3440 					     WUCSR2_IPV6_TCPSYN_RCD_ |
3441 					     WUCSR2_IPV4_TCPSYN_RCD_);
3442 
3443 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3444 					    WUCSR_EEE_RX_WAKE_ |
3445 					    WUCSR_PFDA_FR_ |
3446 					    WUCSR_RFE_WAKE_FR_ |
3447 					    WUCSR_WUFR_ |
3448 					    WUCSR_MPR_ |
3449 					    WUCSR_BCST_FR_);
3450 
3451 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3452 	buf |= MAC_TX_TXEN_;
3453 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3454 
3455 	return 0;
3456 }
3457 
3458 int lan78xx_reset_resume(struct usb_interface *intf)
3459 {
3460 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3461 
3462 	lan78xx_reset(dev);
3463 	return lan78xx_resume(intf);
3464 }
3465 
3466 static const struct usb_device_id products[] = {
3467 	{
3468 	/* LAN7800 USB Gigabit Ethernet Device */
3469 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3470 	},
3471 	{
3472 	/* LAN7850 USB Gigabit Ethernet Device */
3473 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3474 	},
3475 	{},
3476 };
3477 MODULE_DEVICE_TABLE(usb, products);
3478 
3479 static struct usb_driver lan78xx_driver = {
3480 	.name			= DRIVER_NAME,
3481 	.id_table		= products,
3482 	.probe			= lan78xx_probe,
3483 	.disconnect		= lan78xx_disconnect,
3484 	.suspend		= lan78xx_suspend,
3485 	.resume			= lan78xx_resume,
3486 	.reset_resume		= lan78xx_reset_resume,
3487 	.supports_autosuspend	= 1,
3488 	.disable_hub_initiated_lpm = 1,
3489 };
3490 
3491 module_usb_driver(lan78xx_driver);
3492 
3493 MODULE_AUTHOR(DRIVER_AUTHOR);
3494 MODULE_DESCRIPTION(DRIVER_DESC);
3495 MODULE_LICENSE("GPL");
3496