xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision a06c488d)
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
34 #include "lan78xx.h"
35 
36 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME	"lan78xx"
39 #define DRIVER_VERSION	"1.0.1"
40 
41 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
42 #define THROTTLE_JIFFIES		(HZ / 8)
43 #define UNLINK_TIMEOUT_MS		3
44 
45 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
46 
47 #define SS_USB_PKT_SIZE			(1024)
48 #define HS_USB_PKT_SIZE			(512)
49 #define FS_USB_PKT_SIZE			(64)
50 
51 #define MAX_RX_FIFO_SIZE		(12 * 1024)
52 #define MAX_TX_FIFO_SIZE		(12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY		(0x0800)
55 #define MAX_SINGLE_PACKET_SIZE		(9000)
56 #define DEFAULT_TX_CSUM_ENABLE		(true)
57 #define DEFAULT_RX_CSUM_ENABLE		(true)
58 #define DEFAULT_TSO_CSUM_ENABLE		(true)
59 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
60 #define TX_OVERHEAD			(8)
61 #define RXW_PADDING			2
62 
63 #define LAN78XX_USB_VENDOR_ID		(0x0424)
64 #define LAN7800_USB_PRODUCT_ID		(0x7800)
65 #define LAN7850_USB_PRODUCT_ID		(0x7850)
66 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
67 #define LAN78XX_OTP_MAGIC		(0x78F3)
68 
69 #define	MII_READ			1
70 #define	MII_WRITE			0
71 
72 #define EEPROM_INDICATOR		(0xA5)
73 #define EEPROM_MAC_OFFSET		(0x01)
74 #define MAX_EEPROM_SIZE			512
75 #define OTP_INDICATOR_1			(0xF3)
76 #define OTP_INDICATOR_2			(0xF7)
77 
78 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
79 					 WAKE_MCAST | WAKE_BCAST | \
80 					 WAKE_ARP | WAKE_MAGIC)
81 
82 /* USB related defines */
83 #define BULK_IN_PIPE			1
84 #define BULK_OUT_PIPE			2
85 
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
88 
89 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
90 	"RX FCS Errors",
91 	"RX Alignment Errors",
92 	"Rx Fragment Errors",
93 	"RX Jabber Errors",
94 	"RX Undersize Frame Errors",
95 	"RX Oversize Frame Errors",
96 	"RX Dropped Frames",
97 	"RX Unicast Byte Count",
98 	"RX Broadcast Byte Count",
99 	"RX Multicast Byte Count",
100 	"RX Unicast Frames",
101 	"RX Broadcast Frames",
102 	"RX Multicast Frames",
103 	"RX Pause Frames",
104 	"RX 64 Byte Frames",
105 	"RX 65 - 127 Byte Frames",
106 	"RX 128 - 255 Byte Frames",
107 	"RX 256 - 511 Bytes Frames",
108 	"RX 512 - 1023 Byte Frames",
109 	"RX 1024 - 1518 Byte Frames",
110 	"RX Greater 1518 Byte Frames",
111 	"EEE RX LPI Transitions",
112 	"EEE RX LPI Time",
113 	"TX FCS Errors",
114 	"TX Excess Deferral Errors",
115 	"TX Carrier Errors",
116 	"TX Bad Byte Count",
117 	"TX Single Collisions",
118 	"TX Multiple Collisions",
119 	"TX Excessive Collision",
120 	"TX Late Collisions",
121 	"TX Unicast Byte Count",
122 	"TX Broadcast Byte Count",
123 	"TX Multicast Byte Count",
124 	"TX Unicast Frames",
125 	"TX Broadcast Frames",
126 	"TX Multicast Frames",
127 	"TX Pause Frames",
128 	"TX 64 Byte Frames",
129 	"TX 65 - 127 Byte Frames",
130 	"TX 128 - 255 Byte Frames",
131 	"TX 256 - 511 Bytes Frames",
132 	"TX 512 - 1023 Byte Frames",
133 	"TX 1024 - 1518 Byte Frames",
134 	"TX Greater 1518 Byte Frames",
135 	"EEE TX LPI Transitions",
136 	"EEE TX LPI Time",
137 };
138 
139 struct lan78xx_statstage {
140 	u32 rx_fcs_errors;
141 	u32 rx_alignment_errors;
142 	u32 rx_fragment_errors;
143 	u32 rx_jabber_errors;
144 	u32 rx_undersize_frame_errors;
145 	u32 rx_oversize_frame_errors;
146 	u32 rx_dropped_frames;
147 	u32 rx_unicast_byte_count;
148 	u32 rx_broadcast_byte_count;
149 	u32 rx_multicast_byte_count;
150 	u32 rx_unicast_frames;
151 	u32 rx_broadcast_frames;
152 	u32 rx_multicast_frames;
153 	u32 rx_pause_frames;
154 	u32 rx_64_byte_frames;
155 	u32 rx_65_127_byte_frames;
156 	u32 rx_128_255_byte_frames;
157 	u32 rx_256_511_bytes_frames;
158 	u32 rx_512_1023_byte_frames;
159 	u32 rx_1024_1518_byte_frames;
160 	u32 rx_greater_1518_byte_frames;
161 	u32 eee_rx_lpi_transitions;
162 	u32 eee_rx_lpi_time;
163 	u32 tx_fcs_errors;
164 	u32 tx_excess_deferral_errors;
165 	u32 tx_carrier_errors;
166 	u32 tx_bad_byte_count;
167 	u32 tx_single_collisions;
168 	u32 tx_multiple_collisions;
169 	u32 tx_excessive_collision;
170 	u32 tx_late_collisions;
171 	u32 tx_unicast_byte_count;
172 	u32 tx_broadcast_byte_count;
173 	u32 tx_multicast_byte_count;
174 	u32 tx_unicast_frames;
175 	u32 tx_broadcast_frames;
176 	u32 tx_multicast_frames;
177 	u32 tx_pause_frames;
178 	u32 tx_64_byte_frames;
179 	u32 tx_65_127_byte_frames;
180 	u32 tx_128_255_byte_frames;
181 	u32 tx_256_511_bytes_frames;
182 	u32 tx_512_1023_byte_frames;
183 	u32 tx_1024_1518_byte_frames;
184 	u32 tx_greater_1518_byte_frames;
185 	u32 eee_tx_lpi_transitions;
186 	u32 eee_tx_lpi_time;
187 };
188 
189 struct lan78xx_net;
190 
191 struct lan78xx_priv {
192 	struct lan78xx_net *dev;
193 	u32 rfe_ctl;
194 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
195 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
196 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
197 	struct mutex dataport_mutex; /* for dataport access */
198 	spinlock_t rfe_ctl_lock; /* for rfe register access */
199 	struct work_struct set_multicast;
200 	struct work_struct set_vlan;
201 	u32 wol;
202 };
203 
204 enum skb_state {
205 	illegal = 0,
206 	tx_start,
207 	tx_done,
208 	rx_start,
209 	rx_done,
210 	rx_cleanup,
211 	unlink_start
212 };
213 
214 struct skb_data {		/* skb->cb is one of these */
215 	struct urb *urb;
216 	struct lan78xx_net *dev;
217 	enum skb_state state;
218 	size_t length;
219 };
220 
221 struct usb_context {
222 	struct usb_ctrlrequest req;
223 	struct lan78xx_net *dev;
224 };
225 
226 #define EVENT_TX_HALT			0
227 #define EVENT_RX_HALT			1
228 #define EVENT_RX_MEMORY			2
229 #define EVENT_STS_SPLIT			3
230 #define EVENT_LINK_RESET		4
231 #define EVENT_RX_PAUSED			5
232 #define EVENT_DEV_WAKING		6
233 #define EVENT_DEV_ASLEEP		7
234 #define EVENT_DEV_OPEN			8
235 
236 struct lan78xx_net {
237 	struct net_device	*net;
238 	struct usb_device	*udev;
239 	struct usb_interface	*intf;
240 	void			*driver_priv;
241 
242 	int			rx_qlen;
243 	int			tx_qlen;
244 	struct sk_buff_head	rxq;
245 	struct sk_buff_head	txq;
246 	struct sk_buff_head	done;
247 	struct sk_buff_head	rxq_pause;
248 	struct sk_buff_head	txq_pend;
249 
250 	struct tasklet_struct	bh;
251 	struct delayed_work	wq;
252 
253 	struct usb_host_endpoint *ep_blkin;
254 	struct usb_host_endpoint *ep_blkout;
255 	struct usb_host_endpoint *ep_intr;
256 
257 	int			msg_enable;
258 
259 	struct urb		*urb_intr;
260 	struct usb_anchor	deferred;
261 
262 	struct mutex		phy_mutex; /* for phy access */
263 	unsigned		pipe_in, pipe_out, pipe_intr;
264 
265 	u32			hard_mtu;	/* count any extra framing */
266 	size_t			rx_urb_size;	/* size for rx urbs */
267 
268 	unsigned long		flags;
269 
270 	wait_queue_head_t	*wait;
271 	unsigned char		suspend_count;
272 
273 	unsigned		maxpacket;
274 	struct timer_list	delay;
275 
276 	unsigned long		data[5];
277 
278 	int			link_on;
279 	u8			mdix_ctrl;
280 
281 	u32			devid;
282 	struct mii_bus		*mdiobus;
283 };
284 
285 /* use ethtool to change the level for any given device */
286 static int msg_level = -1;
287 module_param(msg_level, int, 0);
288 MODULE_PARM_DESC(msg_level, "Override default message level");
289 
290 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
291 {
292 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
293 	int ret;
294 
295 	if (!buf)
296 		return -ENOMEM;
297 
298 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
299 			      USB_VENDOR_REQUEST_READ_REGISTER,
300 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
301 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
302 	if (likely(ret >= 0)) {
303 		le32_to_cpus(buf);
304 		*data = *buf;
305 	} else {
306 		netdev_warn(dev->net,
307 			    "Failed to read register index 0x%08x. ret = %d",
308 			    index, ret);
309 	}
310 
311 	kfree(buf);
312 
313 	return ret;
314 }
315 
316 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
317 {
318 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
319 	int ret;
320 
321 	if (!buf)
322 		return -ENOMEM;
323 
324 	*buf = data;
325 	cpu_to_le32s(buf);
326 
327 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
328 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
329 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
330 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
331 	if (unlikely(ret < 0)) {
332 		netdev_warn(dev->net,
333 			    "Failed to write register index 0x%08x. ret = %d",
334 			    index, ret);
335 	}
336 
337 	kfree(buf);
338 
339 	return ret;
340 }
341 
342 static int lan78xx_read_stats(struct lan78xx_net *dev,
343 			      struct lan78xx_statstage *data)
344 {
345 	int ret = 0;
346 	int i;
347 	struct lan78xx_statstage *stats;
348 	u32 *src;
349 	u32 *dst;
350 
351 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
352 	if (!stats)
353 		return -ENOMEM;
354 
355 	ret = usb_control_msg(dev->udev,
356 			      usb_rcvctrlpipe(dev->udev, 0),
357 			      USB_VENDOR_REQUEST_GET_STATS,
358 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
359 			      0,
360 			      0,
361 			      (void *)stats,
362 			      sizeof(*stats),
363 			      USB_CTRL_SET_TIMEOUT);
364 	if (likely(ret >= 0)) {
365 		src = (u32 *)stats;
366 		dst = (u32 *)data;
367 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
368 			le32_to_cpus(&src[i]);
369 			dst[i] = src[i];
370 		}
371 	} else {
372 		netdev_warn(dev->net,
373 			    "Failed to read stat ret = 0x%x", ret);
374 	}
375 
376 	kfree(stats);
377 
378 	return ret;
379 }
380 
381 /* Loop until the read is completed with timeout called with phy_mutex held */
382 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
383 {
384 	unsigned long start_time = jiffies;
385 	u32 val;
386 	int ret;
387 
388 	do {
389 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
390 		if (unlikely(ret < 0))
391 			return -EIO;
392 
393 		if (!(val & MII_ACC_MII_BUSY_))
394 			return 0;
395 	} while (!time_after(jiffies, start_time + HZ));
396 
397 	return -EIO;
398 }
399 
400 static inline u32 mii_access(int id, int index, int read)
401 {
402 	u32 ret;
403 
404 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
405 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
406 	if (read)
407 		ret |= MII_ACC_MII_READ_;
408 	else
409 		ret |= MII_ACC_MII_WRITE_;
410 	ret |= MII_ACC_MII_BUSY_;
411 
412 	return ret;
413 }
414 
415 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
416 {
417 	unsigned long start_time = jiffies;
418 	u32 val;
419 	int ret;
420 
421 	do {
422 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
423 		if (unlikely(ret < 0))
424 			return -EIO;
425 
426 		if (!(val & E2P_CMD_EPC_BUSY_) ||
427 		    (val & E2P_CMD_EPC_TIMEOUT_))
428 			break;
429 		usleep_range(40, 100);
430 	} while (!time_after(jiffies, start_time + HZ));
431 
432 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
433 		netdev_warn(dev->net, "EEPROM read operation timeout");
434 		return -EIO;
435 	}
436 
437 	return 0;
438 }
439 
440 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
441 {
442 	unsigned long start_time = jiffies;
443 	u32 val;
444 	int ret;
445 
446 	do {
447 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
448 		if (unlikely(ret < 0))
449 			return -EIO;
450 
451 		if (!(val & E2P_CMD_EPC_BUSY_))
452 			return 0;
453 
454 		usleep_range(40, 100);
455 	} while (!time_after(jiffies, start_time + HZ));
456 
457 	netdev_warn(dev->net, "EEPROM is busy");
458 	return -EIO;
459 }
460 
461 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
462 				   u32 length, u8 *data)
463 {
464 	u32 val;
465 	int i, ret;
466 
467 	ret = lan78xx_eeprom_confirm_not_busy(dev);
468 	if (ret)
469 		return ret;
470 
471 	for (i = 0; i < length; i++) {
472 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
473 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
474 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
475 		if (unlikely(ret < 0))
476 			return -EIO;
477 
478 		ret = lan78xx_wait_eeprom(dev);
479 		if (ret < 0)
480 			return ret;
481 
482 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
483 		if (unlikely(ret < 0))
484 			return -EIO;
485 
486 		data[i] = val & 0xFF;
487 		offset++;
488 	}
489 
490 	return 0;
491 }
492 
493 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
494 			       u32 length, u8 *data)
495 {
496 	u8 sig;
497 	int ret;
498 
499 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
500 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
501 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
502 	else
503 		ret = -EINVAL;
504 
505 	return ret;
506 }
507 
508 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
509 				    u32 length, u8 *data)
510 {
511 	u32 val;
512 	int i, ret;
513 
514 	ret = lan78xx_eeprom_confirm_not_busy(dev);
515 	if (ret)
516 		return ret;
517 
518 	/* Issue write/erase enable command */
519 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
520 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
521 	if (unlikely(ret < 0))
522 		return -EIO;
523 
524 	ret = lan78xx_wait_eeprom(dev);
525 	if (ret < 0)
526 		return ret;
527 
528 	for (i = 0; i < length; i++) {
529 		/* Fill data register */
530 		val = data[i];
531 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
532 		if (ret < 0)
533 			return ret;
534 
535 		/* Send "write" command */
536 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
537 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
538 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
539 		if (ret < 0)
540 			return ret;
541 
542 		ret = lan78xx_wait_eeprom(dev);
543 		if (ret < 0)
544 			return ret;
545 
546 		offset++;
547 	}
548 
549 	return 0;
550 }
551 
552 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
553 				u32 length, u8 *data)
554 {
555 	int i;
556 	int ret;
557 	u32 buf;
558 	unsigned long timeout;
559 
560 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
561 
562 	if (buf & OTP_PWR_DN_PWRDN_N_) {
563 		/* clear it and wait to be cleared */
564 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
565 
566 		timeout = jiffies + HZ;
567 		do {
568 			usleep_range(1, 10);
569 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
570 			if (time_after(jiffies, timeout)) {
571 				netdev_warn(dev->net,
572 					    "timeout on OTP_PWR_DN");
573 				return -EIO;
574 			}
575 		} while (buf & OTP_PWR_DN_PWRDN_N_);
576 	}
577 
578 	for (i = 0; i < length; i++) {
579 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
580 					((offset + i) >> 8) & OTP_ADDR1_15_11);
581 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
582 					((offset + i) & OTP_ADDR2_10_3));
583 
584 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
585 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
586 
587 		timeout = jiffies + HZ;
588 		do {
589 			udelay(1);
590 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
591 			if (time_after(jiffies, timeout)) {
592 				netdev_warn(dev->net,
593 					    "timeout on OTP_STATUS");
594 				return -EIO;
595 			}
596 		} while (buf & OTP_STATUS_BUSY_);
597 
598 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
599 
600 		data[i] = (u8)(buf & 0xFF);
601 	}
602 
603 	return 0;
604 }
605 
606 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
607 				 u32 length, u8 *data)
608 {
609 	int i;
610 	int ret;
611 	u32 buf;
612 	unsigned long timeout;
613 
614 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
615 
616 	if (buf & OTP_PWR_DN_PWRDN_N_) {
617 		/* clear it and wait to be cleared */
618 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
619 
620 		timeout = jiffies + HZ;
621 		do {
622 			udelay(1);
623 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
624 			if (time_after(jiffies, timeout)) {
625 				netdev_warn(dev->net,
626 					    "timeout on OTP_PWR_DN completion");
627 				return -EIO;
628 			}
629 		} while (buf & OTP_PWR_DN_PWRDN_N_);
630 	}
631 
632 	/* set to BYTE program mode */
633 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
634 
635 	for (i = 0; i < length; i++) {
636 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
637 					((offset + i) >> 8) & OTP_ADDR1_15_11);
638 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
639 					((offset + i) & OTP_ADDR2_10_3));
640 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
641 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
642 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
643 
644 		timeout = jiffies + HZ;
645 		do {
646 			udelay(1);
647 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
648 			if (time_after(jiffies, timeout)) {
649 				netdev_warn(dev->net,
650 					    "Timeout on OTP_STATUS completion");
651 				return -EIO;
652 			}
653 		} while (buf & OTP_STATUS_BUSY_);
654 	}
655 
656 	return 0;
657 }
658 
659 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
660 			    u32 length, u8 *data)
661 {
662 	u8 sig;
663 	int ret;
664 
665 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
666 
667 	if (ret == 0) {
668 		if (sig == OTP_INDICATOR_1)
669 			offset = offset;
670 		else if (sig == OTP_INDICATOR_2)
671 			offset += 0x100;
672 		else
673 			ret = -EINVAL;
674 		ret = lan78xx_read_raw_otp(dev, offset, length, data);
675 	}
676 
677 	return ret;
678 }
679 
680 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
681 {
682 	int i, ret;
683 
684 	for (i = 0; i < 100; i++) {
685 		u32 dp_sel;
686 
687 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
688 		if (unlikely(ret < 0))
689 			return -EIO;
690 
691 		if (dp_sel & DP_SEL_DPRDY_)
692 			return 0;
693 
694 		usleep_range(40, 100);
695 	}
696 
697 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
698 
699 	return -EIO;
700 }
701 
702 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
703 				  u32 addr, u32 length, u32 *buf)
704 {
705 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
706 	u32 dp_sel;
707 	int i, ret;
708 
709 	if (usb_autopm_get_interface(dev->intf) < 0)
710 			return 0;
711 
712 	mutex_lock(&pdata->dataport_mutex);
713 
714 	ret = lan78xx_dataport_wait_not_busy(dev);
715 	if (ret < 0)
716 		goto done;
717 
718 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
719 
720 	dp_sel &= ~DP_SEL_RSEL_MASK_;
721 	dp_sel |= ram_select;
722 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
723 
724 	for (i = 0; i < length; i++) {
725 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
726 
727 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
728 
729 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
730 
731 		ret = lan78xx_dataport_wait_not_busy(dev);
732 		if (ret < 0)
733 			goto done;
734 	}
735 
736 done:
737 	mutex_unlock(&pdata->dataport_mutex);
738 	usb_autopm_put_interface(dev->intf);
739 
740 	return ret;
741 }
742 
743 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
744 				    int index, u8 addr[ETH_ALEN])
745 {
746 	u32	temp;
747 
748 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
749 		temp = addr[3];
750 		temp = addr[2] | (temp << 8);
751 		temp = addr[1] | (temp << 8);
752 		temp = addr[0] | (temp << 8);
753 		pdata->pfilter_table[index][1] = temp;
754 		temp = addr[5];
755 		temp = addr[4] | (temp << 8);
756 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
757 		pdata->pfilter_table[index][0] = temp;
758 	}
759 }
760 
761 /* returns hash bit number for given MAC address */
762 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
763 {
764 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
765 }
766 
767 static void lan78xx_deferred_multicast_write(struct work_struct *param)
768 {
769 	struct lan78xx_priv *pdata =
770 			container_of(param, struct lan78xx_priv, set_multicast);
771 	struct lan78xx_net *dev = pdata->dev;
772 	int i;
773 	int ret;
774 
775 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
776 		  pdata->rfe_ctl);
777 
778 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
779 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
780 
781 	for (i = 1; i < NUM_OF_MAF; i++) {
782 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
783 		ret = lan78xx_write_reg(dev, MAF_LO(i),
784 					pdata->pfilter_table[i][1]);
785 		ret = lan78xx_write_reg(dev, MAF_HI(i),
786 					pdata->pfilter_table[i][0]);
787 	}
788 
789 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
790 }
791 
792 static void lan78xx_set_multicast(struct net_device *netdev)
793 {
794 	struct lan78xx_net *dev = netdev_priv(netdev);
795 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
796 	unsigned long flags;
797 	int i;
798 
799 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
800 
801 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
802 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
803 
804 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
805 			pdata->mchash_table[i] = 0;
806 	/* pfilter_table[0] has own HW address */
807 	for (i = 1; i < NUM_OF_MAF; i++) {
808 			pdata->pfilter_table[i][0] =
809 			pdata->pfilter_table[i][1] = 0;
810 	}
811 
812 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
813 
814 	if (dev->net->flags & IFF_PROMISC) {
815 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
816 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
817 	} else {
818 		if (dev->net->flags & IFF_ALLMULTI) {
819 			netif_dbg(dev, drv, dev->net,
820 				  "receive all multicast enabled");
821 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
822 		}
823 	}
824 
825 	if (netdev_mc_count(dev->net)) {
826 		struct netdev_hw_addr *ha;
827 		int i;
828 
829 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
830 
831 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
832 
833 		i = 1;
834 		netdev_for_each_mc_addr(ha, netdev) {
835 			/* set first 32 into Perfect Filter */
836 			if (i < 33) {
837 				lan78xx_set_addr_filter(pdata, i, ha->addr);
838 			} else {
839 				u32 bitnum = lan78xx_hash(ha->addr);
840 
841 				pdata->mchash_table[bitnum / 32] |=
842 							(1 << (bitnum % 32));
843 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
844 			}
845 			i++;
846 		}
847 	}
848 
849 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
850 
851 	/* defer register writes to a sleepable context */
852 	schedule_work(&pdata->set_multicast);
853 }
854 
855 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
856 				      u16 lcladv, u16 rmtadv)
857 {
858 	u32 flow = 0, fct_flow = 0;
859 	int ret;
860 
861 	u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
862 
863 	if (cap & FLOW_CTRL_TX)
864 		flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
865 
866 	if (cap & FLOW_CTRL_RX)
867 		flow |= FLOW_CR_RX_FCEN_;
868 
869 	if (dev->udev->speed == USB_SPEED_SUPER)
870 		fct_flow = 0x817;
871 	else if (dev->udev->speed == USB_SPEED_HIGH)
872 		fct_flow = 0x211;
873 
874 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
875 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
876 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
877 
878 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
879 
880 	/* threshold value should be set before enabling flow */
881 	ret = lan78xx_write_reg(dev, FLOW, flow);
882 
883 	return 0;
884 }
885 
886 static int lan78xx_link_reset(struct lan78xx_net *dev)
887 {
888 	struct phy_device *phydev = dev->net->phydev;
889 	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
890 	int ladv, radv, ret;
891 	u32 buf;
892 
893 	/* clear PHY interrupt status */
894 	ret = phy_read(phydev, LAN88XX_INT_STS);
895 	if (unlikely(ret < 0))
896 		return -EIO;
897 
898 	/* clear LAN78xx interrupt status */
899 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
900 	if (unlikely(ret < 0))
901 		return -EIO;
902 
903 	phy_read_status(phydev);
904 
905 	if (!phydev->link && dev->link_on) {
906 		dev->link_on = false;
907 		netif_carrier_off(dev->net);
908 
909 		/* reset MAC */
910 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
911 		if (unlikely(ret < 0))
912 			return -EIO;
913 		buf |= MAC_CR_RST_;
914 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
915 		if (unlikely(ret < 0))
916 			return -EIO;
917 	} else if (phydev->link && !dev->link_on) {
918 		dev->link_on = true;
919 
920 		phy_ethtool_gset(phydev, &ecmd);
921 
922 		ret = phy_read(phydev, LAN88XX_INT_STS);
923 
924 		if (dev->udev->speed == USB_SPEED_SUPER) {
925 			if (ethtool_cmd_speed(&ecmd) == 1000) {
926 				/* disable U2 */
927 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
928 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
929 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
930 				/* enable U1 */
931 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
932 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
933 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
934 			} else {
935 				/* enable U1 & U2 */
936 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
937 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
938 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
939 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
940 			}
941 		}
942 
943 		ladv = phy_read(phydev, MII_ADVERTISE);
944 		if (ladv < 0)
945 			return ladv;
946 
947 		radv = phy_read(phydev, MII_LPA);
948 		if (radv < 0)
949 			return radv;
950 
951 		netif_dbg(dev, link, dev->net,
952 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
953 			  ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
954 
955 		ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
956 		netif_carrier_on(dev->net);
957 	}
958 
959 	return ret;
960 }
961 
962 /* some work can't be done in tasklets, so we use keventd
963  *
964  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
965  * but tasklet_schedule() doesn't.	hope the failure is rare.
966  */
967 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
968 {
969 	set_bit(work, &dev->flags);
970 	if (!schedule_delayed_work(&dev->wq, 0))
971 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
972 }
973 
974 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
975 {
976 	u32 intdata;
977 
978 	if (urb->actual_length != 4) {
979 		netdev_warn(dev->net,
980 			    "unexpected urb length %d", urb->actual_length);
981 		return;
982 	}
983 
984 	memcpy(&intdata, urb->transfer_buffer, 4);
985 	le32_to_cpus(&intdata);
986 
987 	if (intdata & INT_ENP_PHY_INT) {
988 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
989 			  lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
990 	} else
991 		netdev_warn(dev->net,
992 			    "unexpected interrupt: 0x%08x\n", intdata);
993 }
994 
995 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
996 {
997 	return MAX_EEPROM_SIZE;
998 }
999 
1000 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1001 				      struct ethtool_eeprom *ee, u8 *data)
1002 {
1003 	struct lan78xx_net *dev = netdev_priv(netdev);
1004 
1005 	ee->magic = LAN78XX_EEPROM_MAGIC;
1006 
1007 	return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1008 }
1009 
1010 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1011 				      struct ethtool_eeprom *ee, u8 *data)
1012 {
1013 	struct lan78xx_net *dev = netdev_priv(netdev);
1014 
1015 	/* Allow entire eeprom update only */
1016 	if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1017 	    (ee->offset == 0) &&
1018 	    (ee->len == 512) &&
1019 	    (data[0] == EEPROM_INDICATOR))
1020 		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1021 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1022 		 (ee->offset == 0) &&
1023 		 (ee->len == 512) &&
1024 		 (data[0] == OTP_INDICATOR_1))
1025 		return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1026 
1027 	return -EINVAL;
1028 }
1029 
1030 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1031 				u8 *data)
1032 {
1033 	if (stringset == ETH_SS_STATS)
1034 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1035 }
1036 
1037 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1038 {
1039 	if (sset == ETH_SS_STATS)
1040 		return ARRAY_SIZE(lan78xx_gstrings);
1041 	else
1042 		return -EOPNOTSUPP;
1043 }
1044 
1045 static void lan78xx_get_stats(struct net_device *netdev,
1046 			      struct ethtool_stats *stats, u64 *data)
1047 {
1048 	struct lan78xx_net *dev = netdev_priv(netdev);
1049 	struct lan78xx_statstage lan78xx_stat;
1050 	u32 *p;
1051 	int i;
1052 
1053 	if (usb_autopm_get_interface(dev->intf) < 0)
1054 		return;
1055 
1056 	if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1057 		p = (u32 *)&lan78xx_stat;
1058 		for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1059 			data[i] = p[i];
1060 	}
1061 
1062 	usb_autopm_put_interface(dev->intf);
1063 }
1064 
1065 static void lan78xx_get_wol(struct net_device *netdev,
1066 			    struct ethtool_wolinfo *wol)
1067 {
1068 	struct lan78xx_net *dev = netdev_priv(netdev);
1069 	int ret;
1070 	u32 buf;
1071 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1072 
1073 	if (usb_autopm_get_interface(dev->intf) < 0)
1074 			return;
1075 
1076 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1077 	if (unlikely(ret < 0)) {
1078 		wol->supported = 0;
1079 		wol->wolopts = 0;
1080 	} else {
1081 		if (buf & USB_CFG_RMT_WKP_) {
1082 			wol->supported = WAKE_ALL;
1083 			wol->wolopts = pdata->wol;
1084 		} else {
1085 			wol->supported = 0;
1086 			wol->wolopts = 0;
1087 		}
1088 	}
1089 
1090 	usb_autopm_put_interface(dev->intf);
1091 }
1092 
1093 static int lan78xx_set_wol(struct net_device *netdev,
1094 			   struct ethtool_wolinfo *wol)
1095 {
1096 	struct lan78xx_net *dev = netdev_priv(netdev);
1097 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1098 	int ret;
1099 
1100 	ret = usb_autopm_get_interface(dev->intf);
1101 	if (ret < 0)
1102 		return ret;
1103 
1104 	pdata->wol = 0;
1105 	if (wol->wolopts & WAKE_UCAST)
1106 		pdata->wol |= WAKE_UCAST;
1107 	if (wol->wolopts & WAKE_MCAST)
1108 		pdata->wol |= WAKE_MCAST;
1109 	if (wol->wolopts & WAKE_BCAST)
1110 		pdata->wol |= WAKE_BCAST;
1111 	if (wol->wolopts & WAKE_MAGIC)
1112 		pdata->wol |= WAKE_MAGIC;
1113 	if (wol->wolopts & WAKE_PHY)
1114 		pdata->wol |= WAKE_PHY;
1115 	if (wol->wolopts & WAKE_ARP)
1116 		pdata->wol |= WAKE_ARP;
1117 
1118 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1119 
1120 	phy_ethtool_set_wol(netdev->phydev, wol);
1121 
1122 	usb_autopm_put_interface(dev->intf);
1123 
1124 	return ret;
1125 }
1126 
1127 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1128 {
1129 	struct lan78xx_net *dev = netdev_priv(net);
1130 	struct phy_device *phydev = net->phydev;
1131 	int ret;
1132 	u32 buf;
1133 
1134 	ret = usb_autopm_get_interface(dev->intf);
1135 	if (ret < 0)
1136 		return ret;
1137 
1138 	ret = phy_ethtool_get_eee(phydev, edata);
1139 	if (ret < 0)
1140 		goto exit;
1141 
1142 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1143 	if (buf & MAC_CR_EEE_EN_) {
1144 		edata->eee_enabled = true;
1145 		edata->eee_active = !!(edata->advertised &
1146 				       edata->lp_advertised);
1147 		edata->tx_lpi_enabled = true;
1148 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1149 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1150 		edata->tx_lpi_timer = buf;
1151 	} else {
1152 		edata->eee_enabled = false;
1153 		edata->eee_active = false;
1154 		edata->tx_lpi_enabled = false;
1155 		edata->tx_lpi_timer = 0;
1156 	}
1157 
1158 	ret = 0;
1159 exit:
1160 	usb_autopm_put_interface(dev->intf);
1161 
1162 	return ret;
1163 }
1164 
1165 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1166 {
1167 	struct lan78xx_net *dev = netdev_priv(net);
1168 	int ret;
1169 	u32 buf;
1170 
1171 	ret = usb_autopm_get_interface(dev->intf);
1172 	if (ret < 0)
1173 		return ret;
1174 
1175 	if (edata->eee_enabled) {
1176 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1177 		buf |= MAC_CR_EEE_EN_;
1178 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1179 
1180 		phy_ethtool_set_eee(net->phydev, edata);
1181 
1182 		buf = (u32)edata->tx_lpi_timer;
1183 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1184 	} else {
1185 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1186 		buf &= ~MAC_CR_EEE_EN_;
1187 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1188 	}
1189 
1190 	usb_autopm_put_interface(dev->intf);
1191 
1192 	return 0;
1193 }
1194 
1195 static u32 lan78xx_get_link(struct net_device *net)
1196 {
1197 	phy_read_status(net->phydev);
1198 
1199 	return net->phydev->link;
1200 }
1201 
1202 int lan78xx_nway_reset(struct net_device *net)
1203 {
1204 	return phy_start_aneg(net->phydev);
1205 }
1206 
1207 static void lan78xx_get_drvinfo(struct net_device *net,
1208 				struct ethtool_drvinfo *info)
1209 {
1210 	struct lan78xx_net *dev = netdev_priv(net);
1211 
1212 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1213 	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1214 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1215 }
1216 
1217 static u32 lan78xx_get_msglevel(struct net_device *net)
1218 {
1219 	struct lan78xx_net *dev = netdev_priv(net);
1220 
1221 	return dev->msg_enable;
1222 }
1223 
1224 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1225 {
1226 	struct lan78xx_net *dev = netdev_priv(net);
1227 
1228 	dev->msg_enable = level;
1229 }
1230 
1231 static int lan78xx_get_mdix_status(struct net_device *net)
1232 {
1233 	struct phy_device *phydev = net->phydev;
1234 	int buf;
1235 
1236 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1237 	buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1238 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1239 
1240 	return buf;
1241 }
1242 
1243 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1244 {
1245 	struct lan78xx_net *dev = netdev_priv(net);
1246 	struct phy_device *phydev = net->phydev;
1247 	int buf;
1248 
1249 	if (mdix_ctrl == ETH_TP_MDI) {
1250 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1251 			  LAN88XX_EXT_PAGE_SPACE_1);
1252 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1253 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1254 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1255 			  buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1256 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1257 			  LAN88XX_EXT_PAGE_SPACE_0);
1258 	} else if (mdix_ctrl == ETH_TP_MDI_X) {
1259 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1260 			  LAN88XX_EXT_PAGE_SPACE_1);
1261 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1262 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1263 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1264 			  buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1265 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1266 			  LAN88XX_EXT_PAGE_SPACE_0);
1267 	} else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1268 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1269 			  LAN88XX_EXT_PAGE_SPACE_1);
1270 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1271 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1272 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1273 			  buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1274 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1275 			  LAN88XX_EXT_PAGE_SPACE_0);
1276 	}
1277 	dev->mdix_ctrl = mdix_ctrl;
1278 }
1279 
1280 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1281 {
1282 	struct lan78xx_net *dev = netdev_priv(net);
1283 	struct phy_device *phydev = net->phydev;
1284 	int ret;
1285 	int buf;
1286 
1287 	ret = usb_autopm_get_interface(dev->intf);
1288 	if (ret < 0)
1289 		return ret;
1290 
1291 	ret = phy_ethtool_gset(phydev, cmd);
1292 
1293 	buf = lan78xx_get_mdix_status(net);
1294 
1295 	buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1296 	if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1297 		cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1298 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1299 	} else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1300 		cmd->eth_tp_mdix = ETH_TP_MDI;
1301 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1302 	} else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1303 		cmd->eth_tp_mdix = ETH_TP_MDI_X;
1304 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1305 	}
1306 
1307 	usb_autopm_put_interface(dev->intf);
1308 
1309 	return ret;
1310 }
1311 
1312 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1313 {
1314 	struct lan78xx_net *dev = netdev_priv(net);
1315 	struct phy_device *phydev = net->phydev;
1316 	int ret = 0;
1317 	int temp;
1318 
1319 	ret = usb_autopm_get_interface(dev->intf);
1320 	if (ret < 0)
1321 		return ret;
1322 
1323 	if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1324 		lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1325 	}
1326 
1327 	/* change speed & duplex */
1328 	ret = phy_ethtool_sset(phydev, cmd);
1329 
1330 	if (!cmd->autoneg) {
1331 		/* force link down */
1332 		temp = phy_read(phydev, MII_BMCR);
1333 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1334 		mdelay(1);
1335 		phy_write(phydev, MII_BMCR, temp);
1336 	}
1337 
1338 	usb_autopm_put_interface(dev->intf);
1339 
1340 	return ret;
1341 }
1342 
1343 static const struct ethtool_ops lan78xx_ethtool_ops = {
1344 	.get_link	= lan78xx_get_link,
1345 	.nway_reset	= lan78xx_nway_reset,
1346 	.get_drvinfo	= lan78xx_get_drvinfo,
1347 	.get_msglevel	= lan78xx_get_msglevel,
1348 	.set_msglevel	= lan78xx_set_msglevel,
1349 	.get_settings	= lan78xx_get_settings,
1350 	.set_settings	= lan78xx_set_settings,
1351 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1352 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1353 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1354 	.get_ethtool_stats = lan78xx_get_stats,
1355 	.get_sset_count = lan78xx_get_sset_count,
1356 	.get_strings	= lan78xx_get_strings,
1357 	.get_wol	= lan78xx_get_wol,
1358 	.set_wol	= lan78xx_set_wol,
1359 	.get_eee	= lan78xx_get_eee,
1360 	.set_eee	= lan78xx_set_eee,
1361 };
1362 
1363 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1364 {
1365 	if (!netif_running(netdev))
1366 		return -EINVAL;
1367 
1368 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1369 }
1370 
1371 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1372 {
1373 	u32 addr_lo, addr_hi;
1374 	int ret;
1375 	u8 addr[6];
1376 
1377 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1378 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1379 
1380 	addr[0] = addr_lo & 0xFF;
1381 	addr[1] = (addr_lo >> 8) & 0xFF;
1382 	addr[2] = (addr_lo >> 16) & 0xFF;
1383 	addr[3] = (addr_lo >> 24) & 0xFF;
1384 	addr[4] = addr_hi & 0xFF;
1385 	addr[5] = (addr_hi >> 8) & 0xFF;
1386 
1387 	if (!is_valid_ether_addr(addr)) {
1388 		/* reading mac address from EEPROM or OTP */
1389 		if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1390 					 addr) == 0) ||
1391 		    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1392 				      addr) == 0)) {
1393 			if (is_valid_ether_addr(addr)) {
1394 				/* eeprom values are valid so use them */
1395 				netif_dbg(dev, ifup, dev->net,
1396 					  "MAC address read from EEPROM");
1397 			} else {
1398 				/* generate random MAC */
1399 				random_ether_addr(addr);
1400 				netif_dbg(dev, ifup, dev->net,
1401 					  "MAC address set to random addr");
1402 			}
1403 
1404 			addr_lo = addr[0] | (addr[1] << 8) |
1405 				  (addr[2] << 16) | (addr[3] << 24);
1406 			addr_hi = addr[4] | (addr[5] << 8);
1407 
1408 			ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1409 			ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1410 		} else {
1411 			/* generate random MAC */
1412 			random_ether_addr(addr);
1413 			netif_dbg(dev, ifup, dev->net,
1414 				  "MAC address set to random addr");
1415 		}
1416 	}
1417 
1418 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1419 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1420 
1421 	ether_addr_copy(dev->net->dev_addr, addr);
1422 }
1423 
1424 /* MDIO read and write wrappers for phylib */
1425 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1426 {
1427 	struct lan78xx_net *dev = bus->priv;
1428 	u32 val, addr;
1429 	int ret;
1430 
1431 	ret = usb_autopm_get_interface(dev->intf);
1432 	if (ret < 0)
1433 		return ret;
1434 
1435 	mutex_lock(&dev->phy_mutex);
1436 
1437 	/* confirm MII not busy */
1438 	ret = lan78xx_phy_wait_not_busy(dev);
1439 	if (ret < 0)
1440 		goto done;
1441 
1442 	/* set the address, index & direction (read from PHY) */
1443 	addr = mii_access(phy_id, idx, MII_READ);
1444 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1445 
1446 	ret = lan78xx_phy_wait_not_busy(dev);
1447 	if (ret < 0)
1448 		goto done;
1449 
1450 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1451 
1452 	ret = (int)(val & 0xFFFF);
1453 
1454 done:
1455 	mutex_unlock(&dev->phy_mutex);
1456 	usb_autopm_put_interface(dev->intf);
1457 	return ret;
1458 }
1459 
1460 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1461 				 u16 regval)
1462 {
1463 	struct lan78xx_net *dev = bus->priv;
1464 	u32 val, addr;
1465 	int ret;
1466 
1467 	ret = usb_autopm_get_interface(dev->intf);
1468 	if (ret < 0)
1469 		return ret;
1470 
1471 	mutex_lock(&dev->phy_mutex);
1472 
1473 	/* confirm MII not busy */
1474 	ret = lan78xx_phy_wait_not_busy(dev);
1475 	if (ret < 0)
1476 		goto done;
1477 
1478 	val = (u32)regval;
1479 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1480 
1481 	/* set the address, index & direction (write to PHY) */
1482 	addr = mii_access(phy_id, idx, MII_WRITE);
1483 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1484 
1485 	ret = lan78xx_phy_wait_not_busy(dev);
1486 	if (ret < 0)
1487 		goto done;
1488 
1489 done:
1490 	mutex_unlock(&dev->phy_mutex);
1491 	usb_autopm_put_interface(dev->intf);
1492 	return 0;
1493 }
1494 
1495 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1496 {
1497 	int ret;
1498 	int i;
1499 
1500 	dev->mdiobus = mdiobus_alloc();
1501 	if (!dev->mdiobus) {
1502 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1503 		return -ENOMEM;
1504 	}
1505 
1506 	dev->mdiobus->priv = (void *)dev;
1507 	dev->mdiobus->read = lan78xx_mdiobus_read;
1508 	dev->mdiobus->write = lan78xx_mdiobus_write;
1509 	dev->mdiobus->name = "lan78xx-mdiobus";
1510 
1511 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1512 		 dev->udev->bus->busnum, dev->udev->devnum);
1513 
1514 	/* handle our own interrupt */
1515 	for (i = 0; i < PHY_MAX_ADDR; i++)
1516 		dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
1517 
1518 	switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1519 	case 0x78000000:
1520 	case 0x78500000:
1521 		/* set to internal PHY id */
1522 		dev->mdiobus->phy_mask = ~(1 << 1);
1523 		break;
1524 	}
1525 
1526 	ret = mdiobus_register(dev->mdiobus);
1527 	if (ret) {
1528 		netdev_err(dev->net, "can't register MDIO bus\n");
1529 		goto exit1;
1530 	}
1531 
1532 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1533 	return 0;
1534 exit1:
1535 	mdiobus_free(dev->mdiobus);
1536 	return ret;
1537 }
1538 
1539 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1540 {
1541 	mdiobus_unregister(dev->mdiobus);
1542 	mdiobus_free(dev->mdiobus);
1543 }
1544 
1545 static void lan78xx_link_status_change(struct net_device *net)
1546 {
1547 	/* nothing to do */
1548 }
1549 
1550 static int lan78xx_phy_init(struct lan78xx_net *dev)
1551 {
1552 	int ret;
1553 	struct phy_device *phydev = dev->net->phydev;
1554 
1555 	phydev = phy_find_first(dev->mdiobus);
1556 	if (!phydev) {
1557 		netdev_err(dev->net, "no PHY found\n");
1558 		return -EIO;
1559 	}
1560 
1561 	ret = phy_connect_direct(dev->net, phydev,
1562 				 lan78xx_link_status_change,
1563 				 PHY_INTERFACE_MODE_GMII);
1564 	if (ret) {
1565 		netdev_err(dev->net, "can't attach PHY to %s\n",
1566 			   dev->mdiobus->id);
1567 		return -EIO;
1568 	}
1569 
1570 	/* set to AUTOMDIX */
1571 	lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1572 
1573 	/* MAC doesn't support 1000T Half */
1574 	phydev->supported &= ~SUPPORTED_1000baseT_Half;
1575 	phydev->supported |= (SUPPORTED_10baseT_Half |
1576 			      SUPPORTED_10baseT_Full |
1577 			      SUPPORTED_100baseT_Half |
1578 			      SUPPORTED_100baseT_Full |
1579 			      SUPPORTED_1000baseT_Full |
1580 			      SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1581 	genphy_config_aneg(phydev);
1582 
1583 	/* Workaround to enable PHY interrupt.
1584 	 * phy_start_interrupts() is API for requesting and enabling
1585 	 * PHY interrupt. However, USB-to-Ethernet device can't use
1586 	 * request_irq() called in phy_start_interrupts().
1587 	 * Set PHY to PHY_HALTED and call phy_start()
1588 	 * to make a call to phy_enable_interrupts()
1589 	 */
1590 	phy_stop(phydev);
1591 	phy_start(phydev);
1592 
1593 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1594 
1595 	return 0;
1596 }
1597 
1598 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1599 {
1600 	int ret = 0;
1601 	u32 buf;
1602 	bool rxenabled;
1603 
1604 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1605 
1606 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1607 
1608 	if (rxenabled) {
1609 		buf &= ~MAC_RX_RXEN_;
1610 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1611 	}
1612 
1613 	/* add 4 to size for FCS */
1614 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
1615 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1616 
1617 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
1618 
1619 	if (rxenabled) {
1620 		buf |= MAC_RX_RXEN_;
1621 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1628 {
1629 	struct sk_buff *skb;
1630 	unsigned long flags;
1631 	int count = 0;
1632 
1633 	spin_lock_irqsave(&q->lock, flags);
1634 	while (!skb_queue_empty(q)) {
1635 		struct skb_data	*entry;
1636 		struct urb *urb;
1637 		int ret;
1638 
1639 		skb_queue_walk(q, skb) {
1640 			entry = (struct skb_data *)skb->cb;
1641 			if (entry->state != unlink_start)
1642 				goto found;
1643 		}
1644 		break;
1645 found:
1646 		entry->state = unlink_start;
1647 		urb = entry->urb;
1648 
1649 		/* Get reference count of the URB to avoid it to be
1650 		 * freed during usb_unlink_urb, which may trigger
1651 		 * use-after-free problem inside usb_unlink_urb since
1652 		 * usb_unlink_urb is always racing with .complete
1653 		 * handler(include defer_bh).
1654 		 */
1655 		usb_get_urb(urb);
1656 		spin_unlock_irqrestore(&q->lock, flags);
1657 		/* during some PM-driven resume scenarios,
1658 		 * these (async) unlinks complete immediately
1659 		 */
1660 		ret = usb_unlink_urb(urb);
1661 		if (ret != -EINPROGRESS && ret != 0)
1662 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1663 		else
1664 			count++;
1665 		usb_put_urb(urb);
1666 		spin_lock_irqsave(&q->lock, flags);
1667 	}
1668 	spin_unlock_irqrestore(&q->lock, flags);
1669 	return count;
1670 }
1671 
1672 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1673 {
1674 	struct lan78xx_net *dev = netdev_priv(netdev);
1675 	int ll_mtu = new_mtu + netdev->hard_header_len;
1676 	int old_hard_mtu = dev->hard_mtu;
1677 	int old_rx_urb_size = dev->rx_urb_size;
1678 	int ret;
1679 
1680 	if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1681 		return -EINVAL;
1682 
1683 	if (new_mtu <= 0)
1684 		return -EINVAL;
1685 	/* no second zero-length packet read wanted after mtu-sized packets */
1686 	if ((ll_mtu % dev->maxpacket) == 0)
1687 		return -EDOM;
1688 
1689 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1690 
1691 	netdev->mtu = new_mtu;
1692 
1693 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1694 	if (dev->rx_urb_size == old_hard_mtu) {
1695 		dev->rx_urb_size = dev->hard_mtu;
1696 		if (dev->rx_urb_size > old_rx_urb_size) {
1697 			if (netif_running(dev->net)) {
1698 				unlink_urbs(dev, &dev->rxq);
1699 				tasklet_schedule(&dev->bh);
1700 			}
1701 		}
1702 	}
1703 
1704 	return 0;
1705 }
1706 
1707 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1708 {
1709 	struct lan78xx_net *dev = netdev_priv(netdev);
1710 	struct sockaddr *addr = p;
1711 	u32 addr_lo, addr_hi;
1712 	int ret;
1713 
1714 	if (netif_running(netdev))
1715 		return -EBUSY;
1716 
1717 	if (!is_valid_ether_addr(addr->sa_data))
1718 		return -EADDRNOTAVAIL;
1719 
1720 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
1721 
1722 	addr_lo = netdev->dev_addr[0] |
1723 		  netdev->dev_addr[1] << 8 |
1724 		  netdev->dev_addr[2] << 16 |
1725 		  netdev->dev_addr[3] << 24;
1726 	addr_hi = netdev->dev_addr[4] |
1727 		  netdev->dev_addr[5] << 8;
1728 
1729 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1730 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1731 
1732 	return 0;
1733 }
1734 
1735 /* Enable or disable Rx checksum offload engine */
1736 static int lan78xx_set_features(struct net_device *netdev,
1737 				netdev_features_t features)
1738 {
1739 	struct lan78xx_net *dev = netdev_priv(netdev);
1740 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1741 	unsigned long flags;
1742 	int ret;
1743 
1744 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1745 
1746 	if (features & NETIF_F_RXCSUM) {
1747 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1748 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1749 	} else {
1750 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1751 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1752 	}
1753 
1754 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1755 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1756 	else
1757 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1758 
1759 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1760 
1761 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1762 
1763 	return 0;
1764 }
1765 
1766 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1767 {
1768 	struct lan78xx_priv *pdata =
1769 			container_of(param, struct lan78xx_priv, set_vlan);
1770 	struct lan78xx_net *dev = pdata->dev;
1771 
1772 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1773 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1774 }
1775 
1776 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1777 				   __be16 proto, u16 vid)
1778 {
1779 	struct lan78xx_net *dev = netdev_priv(netdev);
1780 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1781 	u16 vid_bit_index;
1782 	u16 vid_dword_index;
1783 
1784 	vid_dword_index = (vid >> 5) & 0x7F;
1785 	vid_bit_index = vid & 0x1F;
1786 
1787 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1788 
1789 	/* defer register writes to a sleepable context */
1790 	schedule_work(&pdata->set_vlan);
1791 
1792 	return 0;
1793 }
1794 
1795 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1796 				    __be16 proto, u16 vid)
1797 {
1798 	struct lan78xx_net *dev = netdev_priv(netdev);
1799 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1800 	u16 vid_bit_index;
1801 	u16 vid_dword_index;
1802 
1803 	vid_dword_index = (vid >> 5) & 0x7F;
1804 	vid_bit_index = vid & 0x1F;
1805 
1806 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1807 
1808 	/* defer register writes to a sleepable context */
1809 	schedule_work(&pdata->set_vlan);
1810 
1811 	return 0;
1812 }
1813 
1814 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1815 {
1816 	int ret;
1817 	u32 buf;
1818 	u32 regs[6] = { 0 };
1819 
1820 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1821 	if (buf & USB_CFG1_LTM_ENABLE_) {
1822 		u8 temp[2];
1823 		/* Get values from EEPROM first */
1824 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1825 			if (temp[0] == 24) {
1826 				ret = lan78xx_read_raw_eeprom(dev,
1827 							      temp[1] * 2,
1828 							      24,
1829 							      (u8 *)regs);
1830 				if (ret < 0)
1831 					return;
1832 			}
1833 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1834 			if (temp[0] == 24) {
1835 				ret = lan78xx_read_raw_otp(dev,
1836 							   temp[1] * 2,
1837 							   24,
1838 							   (u8 *)regs);
1839 				if (ret < 0)
1840 					return;
1841 			}
1842 		}
1843 	}
1844 
1845 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1846 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1847 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1848 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1849 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1850 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1851 }
1852 
1853 static int lan78xx_reset(struct lan78xx_net *dev)
1854 {
1855 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1856 	u32 buf;
1857 	int ret = 0;
1858 	unsigned long timeout;
1859 
1860 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1861 	buf |= HW_CFG_LRST_;
1862 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
1863 
1864 	timeout = jiffies + HZ;
1865 	do {
1866 		mdelay(1);
1867 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1868 		if (time_after(jiffies, timeout)) {
1869 			netdev_warn(dev->net,
1870 				    "timeout on completion of LiteReset");
1871 			return -EIO;
1872 		}
1873 	} while (buf & HW_CFG_LRST_);
1874 
1875 	lan78xx_init_mac_address(dev);
1876 
1877 	/* save DEVID for later usage */
1878 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
1879 	dev->devid = buf;
1880 
1881 	/* Respond to the IN token with a NAK */
1882 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1883 	buf |= USB_CFG_BIR_;
1884 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1885 
1886 	/* Init LTM */
1887 	lan78xx_init_ltm(dev);
1888 
1889 	dev->net->hard_header_len += TX_OVERHEAD;
1890 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1891 
1892 	if (dev->udev->speed == USB_SPEED_SUPER) {
1893 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1894 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1895 		dev->rx_qlen = 4;
1896 		dev->tx_qlen = 4;
1897 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
1898 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1899 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1900 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1901 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1902 	} else {
1903 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1904 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1905 		dev->rx_qlen = 4;
1906 	}
1907 
1908 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1909 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1910 
1911 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1912 	buf |= HW_CFG_MEF_;
1913 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
1914 
1915 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1916 	buf |= USB_CFG_BCE_;
1917 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1918 
1919 	/* set FIFO sizes */
1920 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1921 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1922 
1923 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1924 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1925 
1926 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1927 	ret = lan78xx_write_reg(dev, FLOW, 0);
1928 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1929 
1930 	/* Don't need rfe_ctl_lock during initialisation */
1931 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1932 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1933 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1934 
1935 	/* Enable or disable checksum offload engines */
1936 	lan78xx_set_features(dev->net, dev->net->features);
1937 
1938 	lan78xx_set_multicast(dev->net);
1939 
1940 	/* reset PHY */
1941 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1942 	buf |= PMT_CTL_PHY_RST_;
1943 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1944 
1945 	timeout = jiffies + HZ;
1946 	do {
1947 		mdelay(1);
1948 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1949 		if (time_after(jiffies, timeout)) {
1950 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
1951 			return -EIO;
1952 		}
1953 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
1954 
1955 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1956 	buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
1957 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
1958 
1959 	/* enable PHY interrupts */
1960 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1961 	buf |= INT_ENP_PHY_INT;
1962 	ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
1963 
1964 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1965 	buf |= MAC_TX_TXEN_;
1966 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
1967 
1968 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
1969 	buf |= FCT_TX_CTL_EN_;
1970 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
1971 
1972 	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1973 
1974 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1975 	buf |= MAC_RX_RXEN_;
1976 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
1977 
1978 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
1979 	buf |= FCT_RX_CTL_EN_;
1980 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
1981 
1982 	return 0;
1983 }
1984 
1985 static int lan78xx_open(struct net_device *net)
1986 {
1987 	struct lan78xx_net *dev = netdev_priv(net);
1988 	int ret;
1989 
1990 	ret = usb_autopm_get_interface(dev->intf);
1991 	if (ret < 0)
1992 		goto out;
1993 
1994 	ret = lan78xx_reset(dev);
1995 	if (ret < 0)
1996 		goto done;
1997 
1998 	ret = lan78xx_phy_init(dev);
1999 	if (ret < 0)
2000 		goto done;
2001 
2002 	/* for Link Check */
2003 	if (dev->urb_intr) {
2004 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2005 		if (ret < 0) {
2006 			netif_err(dev, ifup, dev->net,
2007 				  "intr submit %d\n", ret);
2008 			goto done;
2009 		}
2010 	}
2011 
2012 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2013 
2014 	netif_start_queue(net);
2015 
2016 	dev->link_on = false;
2017 
2018 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2019 done:
2020 	usb_autopm_put_interface(dev->intf);
2021 
2022 out:
2023 	return ret;
2024 }
2025 
2026 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2027 {
2028 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2029 	DECLARE_WAITQUEUE(wait, current);
2030 	int temp;
2031 
2032 	/* ensure there are no more active urbs */
2033 	add_wait_queue(&unlink_wakeup, &wait);
2034 	set_current_state(TASK_UNINTERRUPTIBLE);
2035 	dev->wait = &unlink_wakeup;
2036 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2037 
2038 	/* maybe wait for deletions to finish. */
2039 	while (!skb_queue_empty(&dev->rxq) &&
2040 	       !skb_queue_empty(&dev->txq) &&
2041 	       !skb_queue_empty(&dev->done)) {
2042 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2043 		set_current_state(TASK_UNINTERRUPTIBLE);
2044 		netif_dbg(dev, ifdown, dev->net,
2045 			  "waited for %d urb completions\n", temp);
2046 	}
2047 	set_current_state(TASK_RUNNING);
2048 	dev->wait = NULL;
2049 	remove_wait_queue(&unlink_wakeup, &wait);
2050 }
2051 
2052 int lan78xx_stop(struct net_device *net)
2053 {
2054 	struct lan78xx_net		*dev = netdev_priv(net);
2055 
2056 	phy_stop(net->phydev);
2057 	phy_disconnect(net->phydev);
2058 	net->phydev = NULL;
2059 
2060 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2061 	netif_stop_queue(net);
2062 
2063 	netif_info(dev, ifdown, dev->net,
2064 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2065 		   net->stats.rx_packets, net->stats.tx_packets,
2066 		   net->stats.rx_errors, net->stats.tx_errors);
2067 
2068 	lan78xx_terminate_urbs(dev);
2069 
2070 	usb_kill_urb(dev->urb_intr);
2071 
2072 	skb_queue_purge(&dev->rxq_pause);
2073 
2074 	/* deferred work (task, timer, softirq) must also stop.
2075 	 * can't flush_scheduled_work() until we drop rtnl (later),
2076 	 * else workers could deadlock; so make workers a NOP.
2077 	 */
2078 	dev->flags = 0;
2079 	cancel_delayed_work_sync(&dev->wq);
2080 	tasklet_kill(&dev->bh);
2081 
2082 	usb_autopm_put_interface(dev->intf);
2083 
2084 	return 0;
2085 }
2086 
2087 static int lan78xx_linearize(struct sk_buff *skb)
2088 {
2089 	return skb_linearize(skb);
2090 }
2091 
2092 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2093 				       struct sk_buff *skb, gfp_t flags)
2094 {
2095 	u32 tx_cmd_a, tx_cmd_b;
2096 
2097 	if (skb_headroom(skb) < TX_OVERHEAD) {
2098 		struct sk_buff *skb2;
2099 
2100 		skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2101 		dev_kfree_skb_any(skb);
2102 		skb = skb2;
2103 		if (!skb)
2104 			return NULL;
2105 	}
2106 
2107 	if (lan78xx_linearize(skb) < 0)
2108 		return NULL;
2109 
2110 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2111 
2112 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2113 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2114 
2115 	tx_cmd_b = 0;
2116 	if (skb_is_gso(skb)) {
2117 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2118 
2119 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2120 
2121 		tx_cmd_a |= TX_CMD_A_LSO_;
2122 	}
2123 
2124 	if (skb_vlan_tag_present(skb)) {
2125 		tx_cmd_a |= TX_CMD_A_IVTG_;
2126 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2127 	}
2128 
2129 	skb_push(skb, 4);
2130 	cpu_to_le32s(&tx_cmd_b);
2131 	memcpy(skb->data, &tx_cmd_b, 4);
2132 
2133 	skb_push(skb, 4);
2134 	cpu_to_le32s(&tx_cmd_a);
2135 	memcpy(skb->data, &tx_cmd_a, 4);
2136 
2137 	return skb;
2138 }
2139 
2140 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2141 			       struct sk_buff_head *list, enum skb_state state)
2142 {
2143 	unsigned long flags;
2144 	enum skb_state old_state;
2145 	struct skb_data *entry = (struct skb_data *)skb->cb;
2146 
2147 	spin_lock_irqsave(&list->lock, flags);
2148 	old_state = entry->state;
2149 	entry->state = state;
2150 
2151 	__skb_unlink(skb, list);
2152 	spin_unlock(&list->lock);
2153 	spin_lock(&dev->done.lock);
2154 
2155 	__skb_queue_tail(&dev->done, skb);
2156 	if (skb_queue_len(&dev->done) == 1)
2157 		tasklet_schedule(&dev->bh);
2158 	spin_unlock_irqrestore(&dev->done.lock, flags);
2159 
2160 	return old_state;
2161 }
2162 
2163 static void tx_complete(struct urb *urb)
2164 {
2165 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2166 	struct skb_data *entry = (struct skb_data *)skb->cb;
2167 	struct lan78xx_net *dev = entry->dev;
2168 
2169 	if (urb->status == 0) {
2170 		dev->net->stats.tx_packets++;
2171 		dev->net->stats.tx_bytes += entry->length;
2172 	} else {
2173 		dev->net->stats.tx_errors++;
2174 
2175 		switch (urb->status) {
2176 		case -EPIPE:
2177 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2178 			break;
2179 
2180 		/* software-driven interface shutdown */
2181 		case -ECONNRESET:
2182 		case -ESHUTDOWN:
2183 			break;
2184 
2185 		case -EPROTO:
2186 		case -ETIME:
2187 		case -EILSEQ:
2188 			netif_stop_queue(dev->net);
2189 			break;
2190 		default:
2191 			netif_dbg(dev, tx_err, dev->net,
2192 				  "tx err %d\n", entry->urb->status);
2193 			break;
2194 		}
2195 	}
2196 
2197 	usb_autopm_put_interface_async(dev->intf);
2198 
2199 	defer_bh(dev, skb, &dev->txq, tx_done);
2200 }
2201 
2202 static void lan78xx_queue_skb(struct sk_buff_head *list,
2203 			      struct sk_buff *newsk, enum skb_state state)
2204 {
2205 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2206 
2207 	__skb_queue_tail(list, newsk);
2208 	entry->state = state;
2209 }
2210 
2211 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2212 {
2213 	struct lan78xx_net *dev = netdev_priv(net);
2214 	struct sk_buff *skb2 = NULL;
2215 
2216 	if (skb) {
2217 		skb_tx_timestamp(skb);
2218 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2219 	}
2220 
2221 	if (skb2) {
2222 		skb_queue_tail(&dev->txq_pend, skb2);
2223 
2224 		if (skb_queue_len(&dev->txq_pend) > 10)
2225 			netif_stop_queue(net);
2226 	} else {
2227 		netif_dbg(dev, tx_err, dev->net,
2228 			  "lan78xx_tx_prep return NULL\n");
2229 		dev->net->stats.tx_errors++;
2230 		dev->net->stats.tx_dropped++;
2231 	}
2232 
2233 	tasklet_schedule(&dev->bh);
2234 
2235 	return NETDEV_TX_OK;
2236 }
2237 
2238 int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2239 {
2240 	int tmp;
2241 	struct usb_host_interface *alt = NULL;
2242 	struct usb_host_endpoint *in = NULL, *out = NULL;
2243 	struct usb_host_endpoint *status = NULL;
2244 
2245 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2246 		unsigned ep;
2247 
2248 		in = NULL;
2249 		out = NULL;
2250 		status = NULL;
2251 		alt = intf->altsetting + tmp;
2252 
2253 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2254 			struct usb_host_endpoint *e;
2255 			int intr = 0;
2256 
2257 			e = alt->endpoint + ep;
2258 			switch (e->desc.bmAttributes) {
2259 			case USB_ENDPOINT_XFER_INT:
2260 				if (!usb_endpoint_dir_in(&e->desc))
2261 					continue;
2262 				intr = 1;
2263 				/* FALLTHROUGH */
2264 			case USB_ENDPOINT_XFER_BULK:
2265 				break;
2266 			default:
2267 				continue;
2268 			}
2269 			if (usb_endpoint_dir_in(&e->desc)) {
2270 				if (!intr && !in)
2271 					in = e;
2272 				else if (intr && !status)
2273 					status = e;
2274 			} else {
2275 				if (!out)
2276 					out = e;
2277 			}
2278 		}
2279 		if (in && out)
2280 			break;
2281 	}
2282 	if (!alt || !in || !out)
2283 		return -EINVAL;
2284 
2285 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2286 				       in->desc.bEndpointAddress &
2287 				       USB_ENDPOINT_NUMBER_MASK);
2288 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2289 					out->desc.bEndpointAddress &
2290 					USB_ENDPOINT_NUMBER_MASK);
2291 	dev->ep_intr = status;
2292 
2293 	return 0;
2294 }
2295 
2296 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2297 {
2298 	struct lan78xx_priv *pdata = NULL;
2299 	int ret;
2300 	int i;
2301 
2302 	ret = lan78xx_get_endpoints(dev, intf);
2303 
2304 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2305 
2306 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2307 	if (!pdata) {
2308 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2309 		return -ENOMEM;
2310 	}
2311 
2312 	pdata->dev = dev;
2313 
2314 	spin_lock_init(&pdata->rfe_ctl_lock);
2315 	mutex_init(&pdata->dataport_mutex);
2316 
2317 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2318 
2319 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2320 		pdata->vlan_table[i] = 0;
2321 
2322 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2323 
2324 	dev->net->features = 0;
2325 
2326 	if (DEFAULT_TX_CSUM_ENABLE)
2327 		dev->net->features |= NETIF_F_HW_CSUM;
2328 
2329 	if (DEFAULT_RX_CSUM_ENABLE)
2330 		dev->net->features |= NETIF_F_RXCSUM;
2331 
2332 	if (DEFAULT_TSO_CSUM_ENABLE)
2333 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2334 
2335 	dev->net->hw_features = dev->net->features;
2336 
2337 	/* Init all registers */
2338 	ret = lan78xx_reset(dev);
2339 
2340 	lan78xx_mdio_init(dev);
2341 
2342 	dev->net->flags |= IFF_MULTICAST;
2343 
2344 	pdata->wol = WAKE_MAGIC;
2345 
2346 	return 0;
2347 }
2348 
2349 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2350 {
2351 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2352 
2353 	lan78xx_remove_mdio(dev);
2354 
2355 	if (pdata) {
2356 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2357 		kfree(pdata);
2358 		pdata = NULL;
2359 		dev->data[0] = 0;
2360 	}
2361 }
2362 
2363 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2364 				    struct sk_buff *skb,
2365 				    u32 rx_cmd_a, u32 rx_cmd_b)
2366 {
2367 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2368 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2369 		skb->ip_summed = CHECKSUM_NONE;
2370 	} else {
2371 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2372 		skb->ip_summed = CHECKSUM_COMPLETE;
2373 	}
2374 }
2375 
2376 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2377 {
2378 	int		status;
2379 
2380 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2381 		skb_queue_tail(&dev->rxq_pause, skb);
2382 		return;
2383 	}
2384 
2385 	skb->protocol = eth_type_trans(skb, dev->net);
2386 	dev->net->stats.rx_packets++;
2387 	dev->net->stats.rx_bytes += skb->len;
2388 
2389 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2390 		  skb->len + sizeof(struct ethhdr), skb->protocol);
2391 	memset(skb->cb, 0, sizeof(struct skb_data));
2392 
2393 	if (skb_defer_rx_timestamp(skb))
2394 		return;
2395 
2396 	status = netif_rx(skb);
2397 	if (status != NET_RX_SUCCESS)
2398 		netif_dbg(dev, rx_err, dev->net,
2399 			  "netif_rx status %d\n", status);
2400 }
2401 
2402 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2403 {
2404 	if (skb->len < dev->net->hard_header_len)
2405 		return 0;
2406 
2407 	while (skb->len > 0) {
2408 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
2409 		u16 rx_cmd_c;
2410 		struct sk_buff *skb2;
2411 		unsigned char *packet;
2412 
2413 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2414 		le32_to_cpus(&rx_cmd_a);
2415 		skb_pull(skb, sizeof(rx_cmd_a));
2416 
2417 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2418 		le32_to_cpus(&rx_cmd_b);
2419 		skb_pull(skb, sizeof(rx_cmd_b));
2420 
2421 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2422 		le16_to_cpus(&rx_cmd_c);
2423 		skb_pull(skb, sizeof(rx_cmd_c));
2424 
2425 		packet = skb->data;
2426 
2427 		/* get the packet length */
2428 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2429 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2430 
2431 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2432 			netif_dbg(dev, rx_err, dev->net,
2433 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
2434 		} else {
2435 			/* last frame in this batch */
2436 			if (skb->len == size) {
2437 				lan78xx_rx_csum_offload(dev, skb,
2438 							rx_cmd_a, rx_cmd_b);
2439 
2440 				skb_trim(skb, skb->len - 4); /* remove fcs */
2441 				skb->truesize = size + sizeof(struct sk_buff);
2442 
2443 				return 1;
2444 			}
2445 
2446 			skb2 = skb_clone(skb, GFP_ATOMIC);
2447 			if (unlikely(!skb2)) {
2448 				netdev_warn(dev->net, "Error allocating skb");
2449 				return 0;
2450 			}
2451 
2452 			skb2->len = size;
2453 			skb2->data = packet;
2454 			skb_set_tail_pointer(skb2, size);
2455 
2456 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2457 
2458 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
2459 			skb2->truesize = size + sizeof(struct sk_buff);
2460 
2461 			lan78xx_skb_return(dev, skb2);
2462 		}
2463 
2464 		skb_pull(skb, size);
2465 
2466 		/* padding bytes before the next frame starts */
2467 		if (skb->len)
2468 			skb_pull(skb, align_count);
2469 	}
2470 
2471 	return 1;
2472 }
2473 
2474 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2475 {
2476 	if (!lan78xx_rx(dev, skb)) {
2477 		dev->net->stats.rx_errors++;
2478 		goto done;
2479 	}
2480 
2481 	if (skb->len) {
2482 		lan78xx_skb_return(dev, skb);
2483 		return;
2484 	}
2485 
2486 	netif_dbg(dev, rx_err, dev->net, "drop\n");
2487 	dev->net->stats.rx_errors++;
2488 done:
2489 	skb_queue_tail(&dev->done, skb);
2490 }
2491 
2492 static void rx_complete(struct urb *urb);
2493 
2494 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2495 {
2496 	struct sk_buff *skb;
2497 	struct skb_data *entry;
2498 	unsigned long lockflags;
2499 	size_t size = dev->rx_urb_size;
2500 	int ret = 0;
2501 
2502 	skb = netdev_alloc_skb_ip_align(dev->net, size);
2503 	if (!skb) {
2504 		usb_free_urb(urb);
2505 		return -ENOMEM;
2506 	}
2507 
2508 	entry = (struct skb_data *)skb->cb;
2509 	entry->urb = urb;
2510 	entry->dev = dev;
2511 	entry->length = 0;
2512 
2513 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2514 			  skb->data, size, rx_complete, skb);
2515 
2516 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
2517 
2518 	if (netif_device_present(dev->net) &&
2519 	    netif_running(dev->net) &&
2520 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2521 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2522 		ret = usb_submit_urb(urb, GFP_ATOMIC);
2523 		switch (ret) {
2524 		case 0:
2525 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2526 			break;
2527 		case -EPIPE:
2528 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2529 			break;
2530 		case -ENODEV:
2531 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
2532 			netif_device_detach(dev->net);
2533 			break;
2534 		case -EHOSTUNREACH:
2535 			ret = -ENOLINK;
2536 			break;
2537 		default:
2538 			netif_dbg(dev, rx_err, dev->net,
2539 				  "rx submit, %d\n", ret);
2540 			tasklet_schedule(&dev->bh);
2541 		}
2542 	} else {
2543 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2544 		ret = -ENOLINK;
2545 	}
2546 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2547 	if (ret) {
2548 		dev_kfree_skb_any(skb);
2549 		usb_free_urb(urb);
2550 	}
2551 	return ret;
2552 }
2553 
2554 static void rx_complete(struct urb *urb)
2555 {
2556 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
2557 	struct skb_data	*entry = (struct skb_data *)skb->cb;
2558 	struct lan78xx_net *dev = entry->dev;
2559 	int urb_status = urb->status;
2560 	enum skb_state state;
2561 
2562 	skb_put(skb, urb->actual_length);
2563 	state = rx_done;
2564 	entry->urb = NULL;
2565 
2566 	switch (urb_status) {
2567 	case 0:
2568 		if (skb->len < dev->net->hard_header_len) {
2569 			state = rx_cleanup;
2570 			dev->net->stats.rx_errors++;
2571 			dev->net->stats.rx_length_errors++;
2572 			netif_dbg(dev, rx_err, dev->net,
2573 				  "rx length %d\n", skb->len);
2574 		}
2575 		usb_mark_last_busy(dev->udev);
2576 		break;
2577 	case -EPIPE:
2578 		dev->net->stats.rx_errors++;
2579 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2580 		/* FALLTHROUGH */
2581 	case -ECONNRESET:				/* async unlink */
2582 	case -ESHUTDOWN:				/* hardware gone */
2583 		netif_dbg(dev, ifdown, dev->net,
2584 			  "rx shutdown, code %d\n", urb_status);
2585 		state = rx_cleanup;
2586 		entry->urb = urb;
2587 		urb = NULL;
2588 		break;
2589 	case -EPROTO:
2590 	case -ETIME:
2591 	case -EILSEQ:
2592 		dev->net->stats.rx_errors++;
2593 		state = rx_cleanup;
2594 		entry->urb = urb;
2595 		urb = NULL;
2596 		break;
2597 
2598 	/* data overrun ... flush fifo? */
2599 	case -EOVERFLOW:
2600 		dev->net->stats.rx_over_errors++;
2601 		/* FALLTHROUGH */
2602 
2603 	default:
2604 		state = rx_cleanup;
2605 		dev->net->stats.rx_errors++;
2606 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2607 		break;
2608 	}
2609 
2610 	state = defer_bh(dev, skb, &dev->rxq, state);
2611 
2612 	if (urb) {
2613 		if (netif_running(dev->net) &&
2614 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2615 		    state != unlink_start) {
2616 			rx_submit(dev, urb, GFP_ATOMIC);
2617 			return;
2618 		}
2619 		usb_free_urb(urb);
2620 	}
2621 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2622 }
2623 
2624 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2625 {
2626 	int length;
2627 	struct urb *urb = NULL;
2628 	struct skb_data *entry;
2629 	unsigned long flags;
2630 	struct sk_buff_head *tqp = &dev->txq_pend;
2631 	struct sk_buff *skb, *skb2;
2632 	int ret;
2633 	int count, pos;
2634 	int skb_totallen, pkt_cnt;
2635 
2636 	skb_totallen = 0;
2637 	pkt_cnt = 0;
2638 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2639 		if (skb_is_gso(skb)) {
2640 			if (pkt_cnt) {
2641 				/* handle previous packets first */
2642 				break;
2643 			}
2644 			length = skb->len;
2645 			skb2 = skb_dequeue(tqp);
2646 			goto gso_skb;
2647 		}
2648 
2649 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2650 			break;
2651 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2652 		pkt_cnt++;
2653 	}
2654 
2655 	/* copy to a single skb */
2656 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2657 	if (!skb)
2658 		goto drop;
2659 
2660 	skb_put(skb, skb_totallen);
2661 
2662 	for (count = pos = 0; count < pkt_cnt; count++) {
2663 		skb2 = skb_dequeue(tqp);
2664 		if (skb2) {
2665 			memcpy(skb->data + pos, skb2->data, skb2->len);
2666 			pos += roundup(skb2->len, sizeof(u32));
2667 			dev_kfree_skb(skb2);
2668 		}
2669 	}
2670 
2671 	length = skb_totallen;
2672 
2673 gso_skb:
2674 	urb = usb_alloc_urb(0, GFP_ATOMIC);
2675 	if (!urb) {
2676 		netif_dbg(dev, tx_err, dev->net, "no urb\n");
2677 		goto drop;
2678 	}
2679 
2680 	entry = (struct skb_data *)skb->cb;
2681 	entry->urb = urb;
2682 	entry->dev = dev;
2683 	entry->length = length;
2684 
2685 	spin_lock_irqsave(&dev->txq.lock, flags);
2686 	ret = usb_autopm_get_interface_async(dev->intf);
2687 	if (ret < 0) {
2688 		spin_unlock_irqrestore(&dev->txq.lock, flags);
2689 		goto drop;
2690 	}
2691 
2692 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2693 			  skb->data, skb->len, tx_complete, skb);
2694 
2695 	if (length % dev->maxpacket == 0) {
2696 		/* send USB_ZERO_PACKET */
2697 		urb->transfer_flags |= URB_ZERO_PACKET;
2698 	}
2699 
2700 #ifdef CONFIG_PM
2701 	/* if this triggers the device is still a sleep */
2702 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2703 		/* transmission will be done in resume */
2704 		usb_anchor_urb(urb, &dev->deferred);
2705 		/* no use to process more packets */
2706 		netif_stop_queue(dev->net);
2707 		usb_put_urb(urb);
2708 		spin_unlock_irqrestore(&dev->txq.lock, flags);
2709 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2710 		return;
2711 	}
2712 #endif
2713 
2714 	ret = usb_submit_urb(urb, GFP_ATOMIC);
2715 	switch (ret) {
2716 	case 0:
2717 		dev->net->trans_start = jiffies;
2718 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
2719 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2720 			netif_stop_queue(dev->net);
2721 		break;
2722 	case -EPIPE:
2723 		netif_stop_queue(dev->net);
2724 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2725 		usb_autopm_put_interface_async(dev->intf);
2726 		break;
2727 	default:
2728 		usb_autopm_put_interface_async(dev->intf);
2729 		netif_dbg(dev, tx_err, dev->net,
2730 			  "tx: submit urb err %d\n", ret);
2731 		break;
2732 	}
2733 
2734 	spin_unlock_irqrestore(&dev->txq.lock, flags);
2735 
2736 	if (ret) {
2737 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2738 drop:
2739 		dev->net->stats.tx_dropped++;
2740 		if (skb)
2741 			dev_kfree_skb_any(skb);
2742 		usb_free_urb(urb);
2743 	} else
2744 		netif_dbg(dev, tx_queued, dev->net,
2745 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
2746 }
2747 
2748 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2749 {
2750 	struct urb *urb;
2751 	int i;
2752 
2753 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2754 		for (i = 0; i < 10; i++) {
2755 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2756 				break;
2757 			urb = usb_alloc_urb(0, GFP_ATOMIC);
2758 			if (urb)
2759 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2760 					return;
2761 		}
2762 
2763 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2764 			tasklet_schedule(&dev->bh);
2765 	}
2766 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2767 		netif_wake_queue(dev->net);
2768 }
2769 
2770 static void lan78xx_bh(unsigned long param)
2771 {
2772 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
2773 	struct sk_buff *skb;
2774 	struct skb_data *entry;
2775 
2776 	while ((skb = skb_dequeue(&dev->done))) {
2777 		entry = (struct skb_data *)(skb->cb);
2778 		switch (entry->state) {
2779 		case rx_done:
2780 			entry->state = rx_cleanup;
2781 			rx_process(dev, skb);
2782 			continue;
2783 		case tx_done:
2784 			usb_free_urb(entry->urb);
2785 			dev_kfree_skb(skb);
2786 			continue;
2787 		case rx_cleanup:
2788 			usb_free_urb(entry->urb);
2789 			dev_kfree_skb(skb);
2790 			continue;
2791 		default:
2792 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
2793 			return;
2794 		}
2795 	}
2796 
2797 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
2798 		if (!skb_queue_empty(&dev->txq_pend))
2799 			lan78xx_tx_bh(dev);
2800 
2801 		if (!timer_pending(&dev->delay) &&
2802 		    !test_bit(EVENT_RX_HALT, &dev->flags))
2803 			lan78xx_rx_bh(dev);
2804 	}
2805 }
2806 
2807 static void lan78xx_delayedwork(struct work_struct *work)
2808 {
2809 	int status;
2810 	struct lan78xx_net *dev;
2811 
2812 	dev = container_of(work, struct lan78xx_net, wq.work);
2813 
2814 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2815 		unlink_urbs(dev, &dev->txq);
2816 		status = usb_autopm_get_interface(dev->intf);
2817 		if (status < 0)
2818 			goto fail_pipe;
2819 		status = usb_clear_halt(dev->udev, dev->pipe_out);
2820 		usb_autopm_put_interface(dev->intf);
2821 		if (status < 0 &&
2822 		    status != -EPIPE &&
2823 		    status != -ESHUTDOWN) {
2824 			if (netif_msg_tx_err(dev))
2825 fail_pipe:
2826 				netdev_err(dev->net,
2827 					   "can't clear tx halt, status %d\n",
2828 					   status);
2829 		} else {
2830 			clear_bit(EVENT_TX_HALT, &dev->flags);
2831 			if (status != -ESHUTDOWN)
2832 				netif_wake_queue(dev->net);
2833 		}
2834 	}
2835 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2836 		unlink_urbs(dev, &dev->rxq);
2837 		status = usb_autopm_get_interface(dev->intf);
2838 		if (status < 0)
2839 				goto fail_halt;
2840 		status = usb_clear_halt(dev->udev, dev->pipe_in);
2841 		usb_autopm_put_interface(dev->intf);
2842 		if (status < 0 &&
2843 		    status != -EPIPE &&
2844 		    status != -ESHUTDOWN) {
2845 			if (netif_msg_rx_err(dev))
2846 fail_halt:
2847 				netdev_err(dev->net,
2848 					   "can't clear rx halt, status %d\n",
2849 					   status);
2850 		} else {
2851 			clear_bit(EVENT_RX_HALT, &dev->flags);
2852 			tasklet_schedule(&dev->bh);
2853 		}
2854 	}
2855 
2856 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2857 		int ret = 0;
2858 
2859 		clear_bit(EVENT_LINK_RESET, &dev->flags);
2860 		status = usb_autopm_get_interface(dev->intf);
2861 		if (status < 0)
2862 			goto skip_reset;
2863 		if (lan78xx_link_reset(dev) < 0) {
2864 			usb_autopm_put_interface(dev->intf);
2865 skip_reset:
2866 			netdev_info(dev->net, "link reset failed (%d)\n",
2867 				    ret);
2868 		} else {
2869 			usb_autopm_put_interface(dev->intf);
2870 		}
2871 	}
2872 }
2873 
2874 static void intr_complete(struct urb *urb)
2875 {
2876 	struct lan78xx_net *dev = urb->context;
2877 	int status = urb->status;
2878 
2879 	switch (status) {
2880 	/* success */
2881 	case 0:
2882 		lan78xx_status(dev, urb);
2883 		break;
2884 
2885 	/* software-driven interface shutdown */
2886 	case -ENOENT:			/* urb killed */
2887 	case -ESHUTDOWN:		/* hardware gone */
2888 		netif_dbg(dev, ifdown, dev->net,
2889 			  "intr shutdown, code %d\n", status);
2890 		return;
2891 
2892 	/* NOTE:  not throttling like RX/TX, since this endpoint
2893 	 * already polls infrequently
2894 	 */
2895 	default:
2896 		netdev_dbg(dev->net, "intr status %d\n", status);
2897 		break;
2898 	}
2899 
2900 	if (!netif_running(dev->net))
2901 		return;
2902 
2903 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2904 	status = usb_submit_urb(urb, GFP_ATOMIC);
2905 	if (status != 0)
2906 		netif_err(dev, timer, dev->net,
2907 			  "intr resubmit --> %d\n", status);
2908 }
2909 
2910 static void lan78xx_disconnect(struct usb_interface *intf)
2911 {
2912 	struct lan78xx_net		*dev;
2913 	struct usb_device		*udev;
2914 	struct net_device		*net;
2915 
2916 	dev = usb_get_intfdata(intf);
2917 	usb_set_intfdata(intf, NULL);
2918 	if (!dev)
2919 		return;
2920 
2921 	udev = interface_to_usbdev(intf);
2922 
2923 	net = dev->net;
2924 	unregister_netdev(net);
2925 
2926 	cancel_delayed_work_sync(&dev->wq);
2927 
2928 	usb_scuttle_anchored_urbs(&dev->deferred);
2929 
2930 	lan78xx_unbind(dev, intf);
2931 
2932 	usb_kill_urb(dev->urb_intr);
2933 	usb_free_urb(dev->urb_intr);
2934 
2935 	free_netdev(net);
2936 	usb_put_dev(udev);
2937 }
2938 
2939 void lan78xx_tx_timeout(struct net_device *net)
2940 {
2941 	struct lan78xx_net *dev = netdev_priv(net);
2942 
2943 	unlink_urbs(dev, &dev->txq);
2944 	tasklet_schedule(&dev->bh);
2945 }
2946 
2947 static const struct net_device_ops lan78xx_netdev_ops = {
2948 	.ndo_open		= lan78xx_open,
2949 	.ndo_stop		= lan78xx_stop,
2950 	.ndo_start_xmit		= lan78xx_start_xmit,
2951 	.ndo_tx_timeout		= lan78xx_tx_timeout,
2952 	.ndo_change_mtu		= lan78xx_change_mtu,
2953 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
2954 	.ndo_validate_addr	= eth_validate_addr,
2955 	.ndo_do_ioctl		= lan78xx_ioctl,
2956 	.ndo_set_rx_mode	= lan78xx_set_multicast,
2957 	.ndo_set_features	= lan78xx_set_features,
2958 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
2959 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
2960 };
2961 
2962 static int lan78xx_probe(struct usb_interface *intf,
2963 			 const struct usb_device_id *id)
2964 {
2965 	struct lan78xx_net *dev;
2966 	struct net_device *netdev;
2967 	struct usb_device *udev;
2968 	int ret;
2969 	unsigned maxp;
2970 	unsigned period;
2971 	u8 *buf = NULL;
2972 
2973 	udev = interface_to_usbdev(intf);
2974 	udev = usb_get_dev(udev);
2975 
2976 	ret = -ENOMEM;
2977 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
2978 	if (!netdev) {
2979 			dev_err(&intf->dev, "Error: OOM\n");
2980 			goto out1;
2981 	}
2982 
2983 	/* netdev_printk() needs this */
2984 	SET_NETDEV_DEV(netdev, &intf->dev);
2985 
2986 	dev = netdev_priv(netdev);
2987 	dev->udev = udev;
2988 	dev->intf = intf;
2989 	dev->net = netdev;
2990 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
2991 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
2992 
2993 	skb_queue_head_init(&dev->rxq);
2994 	skb_queue_head_init(&dev->txq);
2995 	skb_queue_head_init(&dev->done);
2996 	skb_queue_head_init(&dev->rxq_pause);
2997 	skb_queue_head_init(&dev->txq_pend);
2998 	mutex_init(&dev->phy_mutex);
2999 
3000 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3001 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3002 	init_usb_anchor(&dev->deferred);
3003 
3004 	netdev->netdev_ops = &lan78xx_netdev_ops;
3005 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3006 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3007 
3008 	ret = lan78xx_bind(dev, intf);
3009 	if (ret < 0)
3010 		goto out2;
3011 	strcpy(netdev->name, "eth%d");
3012 
3013 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3014 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3015 
3016 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3017 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3018 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3019 
3020 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3021 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3022 
3023 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3024 					dev->ep_intr->desc.bEndpointAddress &
3025 					USB_ENDPOINT_NUMBER_MASK);
3026 	period = dev->ep_intr->desc.bInterval;
3027 
3028 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3029 	buf = kmalloc(maxp, GFP_KERNEL);
3030 	if (buf) {
3031 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3032 		if (!dev->urb_intr) {
3033 			kfree(buf);
3034 			goto out3;
3035 		} else {
3036 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3037 					 dev->pipe_intr, buf, maxp,
3038 					 intr_complete, dev, period);
3039 		}
3040 	}
3041 
3042 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3043 
3044 	/* driver requires remote-wakeup capability during autosuspend. */
3045 	intf->needs_remote_wakeup = 1;
3046 
3047 	ret = register_netdev(netdev);
3048 	if (ret != 0) {
3049 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3050 		goto out2;
3051 	}
3052 
3053 	usb_set_intfdata(intf, dev);
3054 
3055 	ret = device_set_wakeup_enable(&udev->dev, true);
3056 
3057 	 /* Default delay of 2sec has more overhead than advantage.
3058 	  * Set to 10sec as default.
3059 	  */
3060 	pm_runtime_set_autosuspend_delay(&udev->dev,
3061 					 DEFAULT_AUTOSUSPEND_DELAY);
3062 
3063 	return 0;
3064 
3065 out3:
3066 	lan78xx_unbind(dev, intf);
3067 out2:
3068 	free_netdev(netdev);
3069 out1:
3070 	usb_put_dev(udev);
3071 
3072 	return ret;
3073 }
3074 
3075 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3076 {
3077 	const u16 crc16poly = 0x8005;
3078 	int i;
3079 	u16 bit, crc, msb;
3080 	u8 data;
3081 
3082 	crc = 0xFFFF;
3083 	for (i = 0; i < len; i++) {
3084 		data = *buf++;
3085 		for (bit = 0; bit < 8; bit++) {
3086 			msb = crc >> 15;
3087 			crc <<= 1;
3088 
3089 			if (msb ^ (u16)(data & 1)) {
3090 				crc ^= crc16poly;
3091 				crc |= (u16)0x0001U;
3092 			}
3093 			data >>= 1;
3094 		}
3095 	}
3096 
3097 	return crc;
3098 }
3099 
3100 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3101 {
3102 	u32 buf;
3103 	int ret;
3104 	int mask_index;
3105 	u16 crc;
3106 	u32 temp_wucsr;
3107 	u32 temp_pmt_ctl;
3108 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3109 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3110 	const u8 arp_type[2] = { 0x08, 0x06 };
3111 
3112 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3113 	buf &= ~MAC_TX_TXEN_;
3114 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3115 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3116 	buf &= ~MAC_RX_RXEN_;
3117 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3118 
3119 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3120 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3121 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3122 
3123 	temp_wucsr = 0;
3124 
3125 	temp_pmt_ctl = 0;
3126 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3127 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3128 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3129 
3130 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3131 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3132 
3133 	mask_index = 0;
3134 	if (wol & WAKE_PHY) {
3135 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3136 
3137 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3138 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3139 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3140 	}
3141 	if (wol & WAKE_MAGIC) {
3142 		temp_wucsr |= WUCSR_MPEN_;
3143 
3144 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3145 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3146 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3147 	}
3148 	if (wol & WAKE_BCAST) {
3149 		temp_wucsr |= WUCSR_BCST_EN_;
3150 
3151 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3152 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3153 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3154 	}
3155 	if (wol & WAKE_MCAST) {
3156 		temp_wucsr |= WUCSR_WAKE_EN_;
3157 
3158 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3159 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3160 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3161 					WUF_CFGX_EN_ |
3162 					WUF_CFGX_TYPE_MCAST_ |
3163 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3164 					(crc & WUF_CFGX_CRC16_MASK_));
3165 
3166 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3167 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3168 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3169 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3170 		mask_index++;
3171 
3172 		/* for IPv6 Multicast */
3173 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3174 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3175 					WUF_CFGX_EN_ |
3176 					WUF_CFGX_TYPE_MCAST_ |
3177 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3178 					(crc & WUF_CFGX_CRC16_MASK_));
3179 
3180 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3181 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3182 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3183 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3184 		mask_index++;
3185 
3186 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3187 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3188 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3189 	}
3190 	if (wol & WAKE_UCAST) {
3191 		temp_wucsr |= WUCSR_PFDA_EN_;
3192 
3193 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3194 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3195 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3196 	}
3197 	if (wol & WAKE_ARP) {
3198 		temp_wucsr |= WUCSR_WAKE_EN_;
3199 
3200 		/* set WUF_CFG & WUF_MASK
3201 		 * for packettype (offset 12,13) = ARP (0x0806)
3202 		 */
3203 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3204 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3205 					WUF_CFGX_EN_ |
3206 					WUF_CFGX_TYPE_ALL_ |
3207 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3208 					(crc & WUF_CFGX_CRC16_MASK_));
3209 
3210 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3211 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3212 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3213 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3214 		mask_index++;
3215 
3216 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3217 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3218 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3219 	}
3220 
3221 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3222 
3223 	/* when multiple WOL bits are set */
3224 	if (hweight_long((unsigned long)wol) > 1) {
3225 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3226 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3227 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3228 	}
3229 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3230 
3231 	/* clear WUPS */
3232 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3233 	buf |= PMT_CTL_WUPS_MASK_;
3234 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3235 
3236 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3237 	buf |= MAC_RX_RXEN_;
3238 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3239 
3240 	return 0;
3241 }
3242 
3243 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3244 {
3245 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3246 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3247 	u32 buf;
3248 	int ret;
3249 	int event;
3250 
3251 	event = message.event;
3252 
3253 	if (!dev->suspend_count++) {
3254 		spin_lock_irq(&dev->txq.lock);
3255 		/* don't autosuspend while transmitting */
3256 		if ((skb_queue_len(&dev->txq) ||
3257 		     skb_queue_len(&dev->txq_pend)) &&
3258 			PMSG_IS_AUTO(message)) {
3259 			spin_unlock_irq(&dev->txq.lock);
3260 			ret = -EBUSY;
3261 			goto out;
3262 		} else {
3263 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3264 			spin_unlock_irq(&dev->txq.lock);
3265 		}
3266 
3267 		/* stop TX & RX */
3268 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3269 		buf &= ~MAC_TX_TXEN_;
3270 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3271 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3272 		buf &= ~MAC_RX_RXEN_;
3273 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3274 
3275 		/* empty out the rx and queues */
3276 		netif_device_detach(dev->net);
3277 		lan78xx_terminate_urbs(dev);
3278 		usb_kill_urb(dev->urb_intr);
3279 
3280 		/* reattach */
3281 		netif_device_attach(dev->net);
3282 	}
3283 
3284 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3285 		if (PMSG_IS_AUTO(message)) {
3286 			/* auto suspend (selective suspend) */
3287 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3288 			buf &= ~MAC_TX_TXEN_;
3289 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3290 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3291 			buf &= ~MAC_RX_RXEN_;
3292 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3293 
3294 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3295 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3296 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3297 
3298 			/* set goodframe wakeup */
3299 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3300 
3301 			buf |= WUCSR_RFE_WAKE_EN_;
3302 			buf |= WUCSR_STORE_WAKE_;
3303 
3304 			ret = lan78xx_write_reg(dev, WUCSR, buf);
3305 
3306 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3307 
3308 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3309 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
3310 
3311 			buf |= PMT_CTL_PHY_WAKE_EN_;
3312 			buf |= PMT_CTL_WOL_EN_;
3313 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
3314 			buf |= PMT_CTL_SUS_MODE_3_;
3315 
3316 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3317 
3318 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3319 
3320 			buf |= PMT_CTL_WUPS_MASK_;
3321 
3322 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3323 
3324 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3325 			buf |= MAC_RX_RXEN_;
3326 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3327 		} else {
3328 			lan78xx_set_suspend(dev, pdata->wol);
3329 		}
3330 	}
3331 
3332 	ret = 0;
3333 out:
3334 	return ret;
3335 }
3336 
3337 int lan78xx_resume(struct usb_interface *intf)
3338 {
3339 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3340 	struct sk_buff *skb;
3341 	struct urb *res;
3342 	int ret;
3343 	u32 buf;
3344 
3345 	if (!--dev->suspend_count) {
3346 		/* resume interrupt URBs */
3347 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3348 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
3349 
3350 		spin_lock_irq(&dev->txq.lock);
3351 		while ((res = usb_get_from_anchor(&dev->deferred))) {
3352 			skb = (struct sk_buff *)res->context;
3353 			ret = usb_submit_urb(res, GFP_ATOMIC);
3354 			if (ret < 0) {
3355 				dev_kfree_skb_any(skb);
3356 				usb_free_urb(res);
3357 				usb_autopm_put_interface_async(dev->intf);
3358 			} else {
3359 				dev->net->trans_start = jiffies;
3360 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
3361 			}
3362 		}
3363 
3364 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3365 		spin_unlock_irq(&dev->txq.lock);
3366 
3367 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3368 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3369 				netif_start_queue(dev->net);
3370 			tasklet_schedule(&dev->bh);
3371 		}
3372 	}
3373 
3374 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3375 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3376 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3377 
3378 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3379 					     WUCSR2_ARP_RCD_ |
3380 					     WUCSR2_IPV6_TCPSYN_RCD_ |
3381 					     WUCSR2_IPV4_TCPSYN_RCD_);
3382 
3383 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3384 					    WUCSR_EEE_RX_WAKE_ |
3385 					    WUCSR_PFDA_FR_ |
3386 					    WUCSR_RFE_WAKE_FR_ |
3387 					    WUCSR_WUFR_ |
3388 					    WUCSR_MPR_ |
3389 					    WUCSR_BCST_FR_);
3390 
3391 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3392 	buf |= MAC_TX_TXEN_;
3393 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3394 
3395 	return 0;
3396 }
3397 
3398 int lan78xx_reset_resume(struct usb_interface *intf)
3399 {
3400 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3401 
3402 	lan78xx_reset(dev);
3403 
3404 	lan78xx_phy_init(dev);
3405 
3406 	return lan78xx_resume(intf);
3407 }
3408 
3409 static const struct usb_device_id products[] = {
3410 	{
3411 	/* LAN7800 USB Gigabit Ethernet Device */
3412 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3413 	},
3414 	{
3415 	/* LAN7850 USB Gigabit Ethernet Device */
3416 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3417 	},
3418 	{},
3419 };
3420 MODULE_DEVICE_TABLE(usb, products);
3421 
3422 static struct usb_driver lan78xx_driver = {
3423 	.name			= DRIVER_NAME,
3424 	.id_table		= products,
3425 	.probe			= lan78xx_probe,
3426 	.disconnect		= lan78xx_disconnect,
3427 	.suspend		= lan78xx_suspend,
3428 	.resume			= lan78xx_resume,
3429 	.reset_resume		= lan78xx_reset_resume,
3430 	.supports_autosuspend	= 1,
3431 	.disable_hub_initiated_lpm = 1,
3432 };
3433 
3434 module_usb_driver(lan78xx_driver);
3435 
3436 MODULE_AUTHOR(DRIVER_AUTHOR);
3437 MODULE_DESCRIPTION(DRIVER_DESC);
3438 MODULE_LICENSE("GPL");
3439