xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision addee42a)
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy.h>
40 #include "lan78xx.h"
41 
42 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
43 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
44 #define DRIVER_NAME	"lan78xx"
45 #define DRIVER_VERSION	"1.0.6"
46 
47 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
48 #define THROTTLE_JIFFIES		(HZ / 8)
49 #define UNLINK_TIMEOUT_MS		3
50 
51 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
52 
53 #define SS_USB_PKT_SIZE			(1024)
54 #define HS_USB_PKT_SIZE			(512)
55 #define FS_USB_PKT_SIZE			(64)
56 
57 #define MAX_RX_FIFO_SIZE		(12 * 1024)
58 #define MAX_TX_FIFO_SIZE		(12 * 1024)
59 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
60 #define DEFAULT_BULK_IN_DELAY		(0x0800)
61 #define MAX_SINGLE_PACKET_SIZE		(9000)
62 #define DEFAULT_TX_CSUM_ENABLE		(true)
63 #define DEFAULT_RX_CSUM_ENABLE		(true)
64 #define DEFAULT_TSO_CSUM_ENABLE		(true)
65 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
66 #define TX_OVERHEAD			(8)
67 #define RXW_PADDING			2
68 
69 #define LAN78XX_USB_VENDOR_ID		(0x0424)
70 #define LAN7800_USB_PRODUCT_ID		(0x7800)
71 #define LAN7850_USB_PRODUCT_ID		(0x7850)
72 #define LAN7801_USB_PRODUCT_ID		(0x7801)
73 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
74 #define LAN78XX_OTP_MAGIC		(0x78F3)
75 
76 #define	MII_READ			1
77 #define	MII_WRITE			0
78 
79 #define EEPROM_INDICATOR		(0xA5)
80 #define EEPROM_MAC_OFFSET		(0x01)
81 #define MAX_EEPROM_SIZE			512
82 #define OTP_INDICATOR_1			(0xF3)
83 #define OTP_INDICATOR_2			(0xF7)
84 
85 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
86 					 WAKE_MCAST | WAKE_BCAST | \
87 					 WAKE_ARP | WAKE_MAGIC)
88 
89 /* USB related defines */
90 #define BULK_IN_PIPE			1
91 #define BULK_OUT_PIPE			2
92 
93 /* default autosuspend delay (mSec)*/
94 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
95 
96 /* statistic update interval (mSec) */
97 #define STAT_UPDATE_TIMER		(1 * 1000)
98 
99 /* defines interrupts from interrupt EP */
100 #define MAX_INT_EP			(32)
101 #define INT_EP_INTEP			(31)
102 #define INT_EP_OTP_WR_DONE		(28)
103 #define INT_EP_EEE_TX_LPI_START		(26)
104 #define INT_EP_EEE_TX_LPI_STOP		(25)
105 #define INT_EP_EEE_RX_LPI		(24)
106 #define INT_EP_MAC_RESET_TIMEOUT	(23)
107 #define INT_EP_RDFO			(22)
108 #define INT_EP_TXE			(21)
109 #define INT_EP_USB_STATUS		(20)
110 #define INT_EP_TX_DIS			(19)
111 #define INT_EP_RX_DIS			(18)
112 #define INT_EP_PHY			(17)
113 #define INT_EP_DP			(16)
114 #define INT_EP_MAC_ERR			(15)
115 #define INT_EP_TDFU			(14)
116 #define INT_EP_TDFO			(13)
117 #define INT_EP_UTX			(12)
118 #define INT_EP_GPIO_11			(11)
119 #define INT_EP_GPIO_10			(10)
120 #define INT_EP_GPIO_9			(9)
121 #define INT_EP_GPIO_8			(8)
122 #define INT_EP_GPIO_7			(7)
123 #define INT_EP_GPIO_6			(6)
124 #define INT_EP_GPIO_5			(5)
125 #define INT_EP_GPIO_4			(4)
126 #define INT_EP_GPIO_3			(3)
127 #define INT_EP_GPIO_2			(2)
128 #define INT_EP_GPIO_1			(1)
129 #define INT_EP_GPIO_0			(0)
130 
131 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
132 	"RX FCS Errors",
133 	"RX Alignment Errors",
134 	"Rx Fragment Errors",
135 	"RX Jabber Errors",
136 	"RX Undersize Frame Errors",
137 	"RX Oversize Frame Errors",
138 	"RX Dropped Frames",
139 	"RX Unicast Byte Count",
140 	"RX Broadcast Byte Count",
141 	"RX Multicast Byte Count",
142 	"RX Unicast Frames",
143 	"RX Broadcast Frames",
144 	"RX Multicast Frames",
145 	"RX Pause Frames",
146 	"RX 64 Byte Frames",
147 	"RX 65 - 127 Byte Frames",
148 	"RX 128 - 255 Byte Frames",
149 	"RX 256 - 511 Bytes Frames",
150 	"RX 512 - 1023 Byte Frames",
151 	"RX 1024 - 1518 Byte Frames",
152 	"RX Greater 1518 Byte Frames",
153 	"EEE RX LPI Transitions",
154 	"EEE RX LPI Time",
155 	"TX FCS Errors",
156 	"TX Excess Deferral Errors",
157 	"TX Carrier Errors",
158 	"TX Bad Byte Count",
159 	"TX Single Collisions",
160 	"TX Multiple Collisions",
161 	"TX Excessive Collision",
162 	"TX Late Collisions",
163 	"TX Unicast Byte Count",
164 	"TX Broadcast Byte Count",
165 	"TX Multicast Byte Count",
166 	"TX Unicast Frames",
167 	"TX Broadcast Frames",
168 	"TX Multicast Frames",
169 	"TX Pause Frames",
170 	"TX 64 Byte Frames",
171 	"TX 65 - 127 Byte Frames",
172 	"TX 128 - 255 Byte Frames",
173 	"TX 256 - 511 Bytes Frames",
174 	"TX 512 - 1023 Byte Frames",
175 	"TX 1024 - 1518 Byte Frames",
176 	"TX Greater 1518 Byte Frames",
177 	"EEE TX LPI Transitions",
178 	"EEE TX LPI Time",
179 };
180 
181 struct lan78xx_statstage {
182 	u32 rx_fcs_errors;
183 	u32 rx_alignment_errors;
184 	u32 rx_fragment_errors;
185 	u32 rx_jabber_errors;
186 	u32 rx_undersize_frame_errors;
187 	u32 rx_oversize_frame_errors;
188 	u32 rx_dropped_frames;
189 	u32 rx_unicast_byte_count;
190 	u32 rx_broadcast_byte_count;
191 	u32 rx_multicast_byte_count;
192 	u32 rx_unicast_frames;
193 	u32 rx_broadcast_frames;
194 	u32 rx_multicast_frames;
195 	u32 rx_pause_frames;
196 	u32 rx_64_byte_frames;
197 	u32 rx_65_127_byte_frames;
198 	u32 rx_128_255_byte_frames;
199 	u32 rx_256_511_bytes_frames;
200 	u32 rx_512_1023_byte_frames;
201 	u32 rx_1024_1518_byte_frames;
202 	u32 rx_greater_1518_byte_frames;
203 	u32 eee_rx_lpi_transitions;
204 	u32 eee_rx_lpi_time;
205 	u32 tx_fcs_errors;
206 	u32 tx_excess_deferral_errors;
207 	u32 tx_carrier_errors;
208 	u32 tx_bad_byte_count;
209 	u32 tx_single_collisions;
210 	u32 tx_multiple_collisions;
211 	u32 tx_excessive_collision;
212 	u32 tx_late_collisions;
213 	u32 tx_unicast_byte_count;
214 	u32 tx_broadcast_byte_count;
215 	u32 tx_multicast_byte_count;
216 	u32 tx_unicast_frames;
217 	u32 tx_broadcast_frames;
218 	u32 tx_multicast_frames;
219 	u32 tx_pause_frames;
220 	u32 tx_64_byte_frames;
221 	u32 tx_65_127_byte_frames;
222 	u32 tx_128_255_byte_frames;
223 	u32 tx_256_511_bytes_frames;
224 	u32 tx_512_1023_byte_frames;
225 	u32 tx_1024_1518_byte_frames;
226 	u32 tx_greater_1518_byte_frames;
227 	u32 eee_tx_lpi_transitions;
228 	u32 eee_tx_lpi_time;
229 };
230 
231 struct lan78xx_statstage64 {
232 	u64 rx_fcs_errors;
233 	u64 rx_alignment_errors;
234 	u64 rx_fragment_errors;
235 	u64 rx_jabber_errors;
236 	u64 rx_undersize_frame_errors;
237 	u64 rx_oversize_frame_errors;
238 	u64 rx_dropped_frames;
239 	u64 rx_unicast_byte_count;
240 	u64 rx_broadcast_byte_count;
241 	u64 rx_multicast_byte_count;
242 	u64 rx_unicast_frames;
243 	u64 rx_broadcast_frames;
244 	u64 rx_multicast_frames;
245 	u64 rx_pause_frames;
246 	u64 rx_64_byte_frames;
247 	u64 rx_65_127_byte_frames;
248 	u64 rx_128_255_byte_frames;
249 	u64 rx_256_511_bytes_frames;
250 	u64 rx_512_1023_byte_frames;
251 	u64 rx_1024_1518_byte_frames;
252 	u64 rx_greater_1518_byte_frames;
253 	u64 eee_rx_lpi_transitions;
254 	u64 eee_rx_lpi_time;
255 	u64 tx_fcs_errors;
256 	u64 tx_excess_deferral_errors;
257 	u64 tx_carrier_errors;
258 	u64 tx_bad_byte_count;
259 	u64 tx_single_collisions;
260 	u64 tx_multiple_collisions;
261 	u64 tx_excessive_collision;
262 	u64 tx_late_collisions;
263 	u64 tx_unicast_byte_count;
264 	u64 tx_broadcast_byte_count;
265 	u64 tx_multicast_byte_count;
266 	u64 tx_unicast_frames;
267 	u64 tx_broadcast_frames;
268 	u64 tx_multicast_frames;
269 	u64 tx_pause_frames;
270 	u64 tx_64_byte_frames;
271 	u64 tx_65_127_byte_frames;
272 	u64 tx_128_255_byte_frames;
273 	u64 tx_256_511_bytes_frames;
274 	u64 tx_512_1023_byte_frames;
275 	u64 tx_1024_1518_byte_frames;
276 	u64 tx_greater_1518_byte_frames;
277 	u64 eee_tx_lpi_transitions;
278 	u64 eee_tx_lpi_time;
279 };
280 
281 struct lan78xx_net;
282 
283 struct lan78xx_priv {
284 	struct lan78xx_net *dev;
285 	u32 rfe_ctl;
286 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
287 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
288 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
289 	struct mutex dataport_mutex; /* for dataport access */
290 	spinlock_t rfe_ctl_lock; /* for rfe register access */
291 	struct work_struct set_multicast;
292 	struct work_struct set_vlan;
293 	u32 wol;
294 };
295 
296 enum skb_state {
297 	illegal = 0,
298 	tx_start,
299 	tx_done,
300 	rx_start,
301 	rx_done,
302 	rx_cleanup,
303 	unlink_start
304 };
305 
306 struct skb_data {		/* skb->cb is one of these */
307 	struct urb *urb;
308 	struct lan78xx_net *dev;
309 	enum skb_state state;
310 	size_t length;
311 	int num_of_packet;
312 };
313 
314 struct usb_context {
315 	struct usb_ctrlrequest req;
316 	struct lan78xx_net *dev;
317 };
318 
319 #define EVENT_TX_HALT			0
320 #define EVENT_RX_HALT			1
321 #define EVENT_RX_MEMORY			2
322 #define EVENT_STS_SPLIT			3
323 #define EVENT_LINK_RESET		4
324 #define EVENT_RX_PAUSED			5
325 #define EVENT_DEV_WAKING		6
326 #define EVENT_DEV_ASLEEP		7
327 #define EVENT_DEV_OPEN			8
328 #define EVENT_STAT_UPDATE		9
329 
330 struct statstage {
331 	struct mutex			access_lock;	/* for stats access */
332 	struct lan78xx_statstage	saved;
333 	struct lan78xx_statstage	rollover_count;
334 	struct lan78xx_statstage	rollover_max;
335 	struct lan78xx_statstage64	curr_stat;
336 };
337 
338 struct irq_domain_data {
339 	struct irq_domain	*irqdomain;
340 	unsigned int		phyirq;
341 	struct irq_chip		*irqchip;
342 	irq_flow_handler_t	irq_handler;
343 	u32			irqenable;
344 	struct mutex		irq_lock;		/* for irq bus access */
345 };
346 
347 struct lan78xx_net {
348 	struct net_device	*net;
349 	struct usb_device	*udev;
350 	struct usb_interface	*intf;
351 	void			*driver_priv;
352 
353 	int			rx_qlen;
354 	int			tx_qlen;
355 	struct sk_buff_head	rxq;
356 	struct sk_buff_head	txq;
357 	struct sk_buff_head	done;
358 	struct sk_buff_head	rxq_pause;
359 	struct sk_buff_head	txq_pend;
360 
361 	struct tasklet_struct	bh;
362 	struct delayed_work	wq;
363 
364 	struct usb_host_endpoint *ep_blkin;
365 	struct usb_host_endpoint *ep_blkout;
366 	struct usb_host_endpoint *ep_intr;
367 
368 	int			msg_enable;
369 
370 	struct urb		*urb_intr;
371 	struct usb_anchor	deferred;
372 
373 	struct mutex		phy_mutex; /* for phy access */
374 	unsigned		pipe_in, pipe_out, pipe_intr;
375 
376 	u32			hard_mtu;	/* count any extra framing */
377 	size_t			rx_urb_size;	/* size for rx urbs */
378 
379 	unsigned long		flags;
380 
381 	wait_queue_head_t	*wait;
382 	unsigned char		suspend_count;
383 
384 	unsigned		maxpacket;
385 	struct timer_list	delay;
386 	struct timer_list	stat_monitor;
387 
388 	unsigned long		data[5];
389 
390 	int			link_on;
391 	u8			mdix_ctrl;
392 
393 	u32			chipid;
394 	u32			chiprev;
395 	struct mii_bus		*mdiobus;
396 	phy_interface_t		interface;
397 
398 	int			fc_autoneg;
399 	u8			fc_request_control;
400 
401 	int			delta;
402 	struct statstage	stats;
403 
404 	struct irq_domain_data	domain_data;
405 };
406 
407 /* define external phy id */
408 #define	PHY_LAN8835			(0x0007C130)
409 #define	PHY_KSZ9031RNX			(0x00221620)
410 
411 /* use ethtool to change the level for any given device */
412 static int msg_level = -1;
413 module_param(msg_level, int, 0);
414 MODULE_PARM_DESC(msg_level, "Override default message level");
415 
416 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
417 {
418 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
419 	int ret;
420 
421 	if (!buf)
422 		return -ENOMEM;
423 
424 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
425 			      USB_VENDOR_REQUEST_READ_REGISTER,
426 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
427 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
428 	if (likely(ret >= 0)) {
429 		le32_to_cpus(buf);
430 		*data = *buf;
431 	} else {
432 		netdev_warn(dev->net,
433 			    "Failed to read register index 0x%08x. ret = %d",
434 			    index, ret);
435 	}
436 
437 	kfree(buf);
438 
439 	return ret;
440 }
441 
442 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
443 {
444 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
445 	int ret;
446 
447 	if (!buf)
448 		return -ENOMEM;
449 
450 	*buf = data;
451 	cpu_to_le32s(buf);
452 
453 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
454 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
455 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
456 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
457 	if (unlikely(ret < 0)) {
458 		netdev_warn(dev->net,
459 			    "Failed to write register index 0x%08x. ret = %d",
460 			    index, ret);
461 	}
462 
463 	kfree(buf);
464 
465 	return ret;
466 }
467 
468 static int lan78xx_read_stats(struct lan78xx_net *dev,
469 			      struct lan78xx_statstage *data)
470 {
471 	int ret = 0;
472 	int i;
473 	struct lan78xx_statstage *stats;
474 	u32 *src;
475 	u32 *dst;
476 
477 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
478 	if (!stats)
479 		return -ENOMEM;
480 
481 	ret = usb_control_msg(dev->udev,
482 			      usb_rcvctrlpipe(dev->udev, 0),
483 			      USB_VENDOR_REQUEST_GET_STATS,
484 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
485 			      0,
486 			      0,
487 			      (void *)stats,
488 			      sizeof(*stats),
489 			      USB_CTRL_SET_TIMEOUT);
490 	if (likely(ret >= 0)) {
491 		src = (u32 *)stats;
492 		dst = (u32 *)data;
493 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
494 			le32_to_cpus(&src[i]);
495 			dst[i] = src[i];
496 		}
497 	} else {
498 		netdev_warn(dev->net,
499 			    "Failed to read stat ret = 0x%x", ret);
500 	}
501 
502 	kfree(stats);
503 
504 	return ret;
505 }
506 
507 #define check_counter_rollover(struct1, dev_stats, member) {	\
508 	if (struct1->member < dev_stats.saved.member)		\
509 		dev_stats.rollover_count.member++;		\
510 	}
511 
512 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
513 					struct lan78xx_statstage *stats)
514 {
515 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
516 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
517 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
518 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
519 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
520 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
521 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
522 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
523 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
524 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
525 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
526 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
527 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
528 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
529 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
530 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
531 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
532 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
533 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
534 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
535 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
536 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
537 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
538 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
539 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
540 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
541 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
542 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
543 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
544 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
545 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
546 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
547 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
548 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
549 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
550 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
551 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
552 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
553 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
554 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
555 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
556 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
557 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
558 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
559 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
560 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
561 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
562 
563 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
564 }
565 
566 static void lan78xx_update_stats(struct lan78xx_net *dev)
567 {
568 	u32 *p, *count, *max;
569 	u64 *data;
570 	int i;
571 	struct lan78xx_statstage lan78xx_stats;
572 
573 	if (usb_autopm_get_interface(dev->intf) < 0)
574 		return;
575 
576 	p = (u32 *)&lan78xx_stats;
577 	count = (u32 *)&dev->stats.rollover_count;
578 	max = (u32 *)&dev->stats.rollover_max;
579 	data = (u64 *)&dev->stats.curr_stat;
580 
581 	mutex_lock(&dev->stats.access_lock);
582 
583 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
584 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
585 
586 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
587 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
588 
589 	mutex_unlock(&dev->stats.access_lock);
590 
591 	usb_autopm_put_interface(dev->intf);
592 }
593 
594 /* Loop until the read is completed with timeout called with phy_mutex held */
595 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
596 {
597 	unsigned long start_time = jiffies;
598 	u32 val;
599 	int ret;
600 
601 	do {
602 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
603 		if (unlikely(ret < 0))
604 			return -EIO;
605 
606 		if (!(val & MII_ACC_MII_BUSY_))
607 			return 0;
608 	} while (!time_after(jiffies, start_time + HZ));
609 
610 	return -EIO;
611 }
612 
613 static inline u32 mii_access(int id, int index, int read)
614 {
615 	u32 ret;
616 
617 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
618 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
619 	if (read)
620 		ret |= MII_ACC_MII_READ_;
621 	else
622 		ret |= MII_ACC_MII_WRITE_;
623 	ret |= MII_ACC_MII_BUSY_;
624 
625 	return ret;
626 }
627 
628 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
629 {
630 	unsigned long start_time = jiffies;
631 	u32 val;
632 	int ret;
633 
634 	do {
635 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
636 		if (unlikely(ret < 0))
637 			return -EIO;
638 
639 		if (!(val & E2P_CMD_EPC_BUSY_) ||
640 		    (val & E2P_CMD_EPC_TIMEOUT_))
641 			break;
642 		usleep_range(40, 100);
643 	} while (!time_after(jiffies, start_time + HZ));
644 
645 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
646 		netdev_warn(dev->net, "EEPROM read operation timeout");
647 		return -EIO;
648 	}
649 
650 	return 0;
651 }
652 
653 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
654 {
655 	unsigned long start_time = jiffies;
656 	u32 val;
657 	int ret;
658 
659 	do {
660 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661 		if (unlikely(ret < 0))
662 			return -EIO;
663 
664 		if (!(val & E2P_CMD_EPC_BUSY_))
665 			return 0;
666 
667 		usleep_range(40, 100);
668 	} while (!time_after(jiffies, start_time + HZ));
669 
670 	netdev_warn(dev->net, "EEPROM is busy");
671 	return -EIO;
672 }
673 
674 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
675 				   u32 length, u8 *data)
676 {
677 	u32 val;
678 	u32 saved;
679 	int i, ret;
680 	int retval;
681 
682 	/* depends on chip, some EEPROM pins are muxed with LED function.
683 	 * disable & restore LED function to access EEPROM.
684 	 */
685 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
686 	saved = val;
687 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
688 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
689 		ret = lan78xx_write_reg(dev, HW_CFG, val);
690 	}
691 
692 	retval = lan78xx_eeprom_confirm_not_busy(dev);
693 	if (retval)
694 		return retval;
695 
696 	for (i = 0; i < length; i++) {
697 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
698 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
699 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
700 		if (unlikely(ret < 0)) {
701 			retval = -EIO;
702 			goto exit;
703 		}
704 
705 		retval = lan78xx_wait_eeprom(dev);
706 		if (retval < 0)
707 			goto exit;
708 
709 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
710 		if (unlikely(ret < 0)) {
711 			retval = -EIO;
712 			goto exit;
713 		}
714 
715 		data[i] = val & 0xFF;
716 		offset++;
717 	}
718 
719 	retval = 0;
720 exit:
721 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
722 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
723 
724 	return retval;
725 }
726 
727 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
728 			       u32 length, u8 *data)
729 {
730 	u8 sig;
731 	int ret;
732 
733 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
734 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
735 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
736 	else
737 		ret = -EINVAL;
738 
739 	return ret;
740 }
741 
742 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
743 				    u32 length, u8 *data)
744 {
745 	u32 val;
746 	u32 saved;
747 	int i, ret;
748 	int retval;
749 
750 	/* depends on chip, some EEPROM pins are muxed with LED function.
751 	 * disable & restore LED function to access EEPROM.
752 	 */
753 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
754 	saved = val;
755 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
756 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
757 		ret = lan78xx_write_reg(dev, HW_CFG, val);
758 	}
759 
760 	retval = lan78xx_eeprom_confirm_not_busy(dev);
761 	if (retval)
762 		goto exit;
763 
764 	/* Issue write/erase enable command */
765 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
766 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
767 	if (unlikely(ret < 0)) {
768 		retval = -EIO;
769 		goto exit;
770 	}
771 
772 	retval = lan78xx_wait_eeprom(dev);
773 	if (retval < 0)
774 		goto exit;
775 
776 	for (i = 0; i < length; i++) {
777 		/* Fill data register */
778 		val = data[i];
779 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
780 		if (ret < 0) {
781 			retval = -EIO;
782 			goto exit;
783 		}
784 
785 		/* Send "write" command */
786 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
787 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
788 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
789 		if (ret < 0) {
790 			retval = -EIO;
791 			goto exit;
792 		}
793 
794 		retval = lan78xx_wait_eeprom(dev);
795 		if (retval < 0)
796 			goto exit;
797 
798 		offset++;
799 	}
800 
801 	retval = 0;
802 exit:
803 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
804 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
805 
806 	return retval;
807 }
808 
809 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
810 				u32 length, u8 *data)
811 {
812 	int i;
813 	int ret;
814 	u32 buf;
815 	unsigned long timeout;
816 
817 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
818 
819 	if (buf & OTP_PWR_DN_PWRDN_N_) {
820 		/* clear it and wait to be cleared */
821 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
822 
823 		timeout = jiffies + HZ;
824 		do {
825 			usleep_range(1, 10);
826 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
827 			if (time_after(jiffies, timeout)) {
828 				netdev_warn(dev->net,
829 					    "timeout on OTP_PWR_DN");
830 				return -EIO;
831 			}
832 		} while (buf & OTP_PWR_DN_PWRDN_N_);
833 	}
834 
835 	for (i = 0; i < length; i++) {
836 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
837 					((offset + i) >> 8) & OTP_ADDR1_15_11);
838 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
839 					((offset + i) & OTP_ADDR2_10_3));
840 
841 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
842 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
843 
844 		timeout = jiffies + HZ;
845 		do {
846 			udelay(1);
847 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848 			if (time_after(jiffies, timeout)) {
849 				netdev_warn(dev->net,
850 					    "timeout on OTP_STATUS");
851 				return -EIO;
852 			}
853 		} while (buf & OTP_STATUS_BUSY_);
854 
855 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
856 
857 		data[i] = (u8)(buf & 0xFF);
858 	}
859 
860 	return 0;
861 }
862 
863 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
864 				 u32 length, u8 *data)
865 {
866 	int i;
867 	int ret;
868 	u32 buf;
869 	unsigned long timeout;
870 
871 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
872 
873 	if (buf & OTP_PWR_DN_PWRDN_N_) {
874 		/* clear it and wait to be cleared */
875 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
876 
877 		timeout = jiffies + HZ;
878 		do {
879 			udelay(1);
880 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
881 			if (time_after(jiffies, timeout)) {
882 				netdev_warn(dev->net,
883 					    "timeout on OTP_PWR_DN completion");
884 				return -EIO;
885 			}
886 		} while (buf & OTP_PWR_DN_PWRDN_N_);
887 	}
888 
889 	/* set to BYTE program mode */
890 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
891 
892 	for (i = 0; i < length; i++) {
893 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
894 					((offset + i) >> 8) & OTP_ADDR1_15_11);
895 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
896 					((offset + i) & OTP_ADDR2_10_3));
897 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
898 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
899 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
900 
901 		timeout = jiffies + HZ;
902 		do {
903 			udelay(1);
904 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
905 			if (time_after(jiffies, timeout)) {
906 				netdev_warn(dev->net,
907 					    "Timeout on OTP_STATUS completion");
908 				return -EIO;
909 			}
910 		} while (buf & OTP_STATUS_BUSY_);
911 	}
912 
913 	return 0;
914 }
915 
916 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
917 			    u32 length, u8 *data)
918 {
919 	u8 sig;
920 	int ret;
921 
922 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
923 
924 	if (ret == 0) {
925 		if (sig == OTP_INDICATOR_1)
926 			offset = offset;
927 		else if (sig == OTP_INDICATOR_2)
928 			offset += 0x100;
929 		else
930 			ret = -EINVAL;
931 		if (!ret)
932 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
933 	}
934 
935 	return ret;
936 }
937 
938 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
939 {
940 	int i, ret;
941 
942 	for (i = 0; i < 100; i++) {
943 		u32 dp_sel;
944 
945 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
946 		if (unlikely(ret < 0))
947 			return -EIO;
948 
949 		if (dp_sel & DP_SEL_DPRDY_)
950 			return 0;
951 
952 		usleep_range(40, 100);
953 	}
954 
955 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
956 
957 	return -EIO;
958 }
959 
960 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
961 				  u32 addr, u32 length, u32 *buf)
962 {
963 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
964 	u32 dp_sel;
965 	int i, ret;
966 
967 	if (usb_autopm_get_interface(dev->intf) < 0)
968 			return 0;
969 
970 	mutex_lock(&pdata->dataport_mutex);
971 
972 	ret = lan78xx_dataport_wait_not_busy(dev);
973 	if (ret < 0)
974 		goto done;
975 
976 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
977 
978 	dp_sel &= ~DP_SEL_RSEL_MASK_;
979 	dp_sel |= ram_select;
980 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
981 
982 	for (i = 0; i < length; i++) {
983 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
984 
985 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
986 
987 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
988 
989 		ret = lan78xx_dataport_wait_not_busy(dev);
990 		if (ret < 0)
991 			goto done;
992 	}
993 
994 done:
995 	mutex_unlock(&pdata->dataport_mutex);
996 	usb_autopm_put_interface(dev->intf);
997 
998 	return ret;
999 }
1000 
1001 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1002 				    int index, u8 addr[ETH_ALEN])
1003 {
1004 	u32	temp;
1005 
1006 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1007 		temp = addr[3];
1008 		temp = addr[2] | (temp << 8);
1009 		temp = addr[1] | (temp << 8);
1010 		temp = addr[0] | (temp << 8);
1011 		pdata->pfilter_table[index][1] = temp;
1012 		temp = addr[5];
1013 		temp = addr[4] | (temp << 8);
1014 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1015 		pdata->pfilter_table[index][0] = temp;
1016 	}
1017 }
1018 
1019 /* returns hash bit number for given MAC address */
1020 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1021 {
1022 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1023 }
1024 
1025 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1026 {
1027 	struct lan78xx_priv *pdata =
1028 			container_of(param, struct lan78xx_priv, set_multicast);
1029 	struct lan78xx_net *dev = pdata->dev;
1030 	int i;
1031 	int ret;
1032 
1033 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1034 		  pdata->rfe_ctl);
1035 
1036 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1037 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1038 
1039 	for (i = 1; i < NUM_OF_MAF; i++) {
1040 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1041 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1042 					pdata->pfilter_table[i][1]);
1043 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1044 					pdata->pfilter_table[i][0]);
1045 	}
1046 
1047 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1048 }
1049 
1050 static void lan78xx_set_multicast(struct net_device *netdev)
1051 {
1052 	struct lan78xx_net *dev = netdev_priv(netdev);
1053 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1054 	unsigned long flags;
1055 	int i;
1056 
1057 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1058 
1059 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1060 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1061 
1062 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1063 			pdata->mchash_table[i] = 0;
1064 	/* pfilter_table[0] has own HW address */
1065 	for (i = 1; i < NUM_OF_MAF; i++) {
1066 			pdata->pfilter_table[i][0] =
1067 			pdata->pfilter_table[i][1] = 0;
1068 	}
1069 
1070 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1071 
1072 	if (dev->net->flags & IFF_PROMISC) {
1073 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1074 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1075 	} else {
1076 		if (dev->net->flags & IFF_ALLMULTI) {
1077 			netif_dbg(dev, drv, dev->net,
1078 				  "receive all multicast enabled");
1079 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1080 		}
1081 	}
1082 
1083 	if (netdev_mc_count(dev->net)) {
1084 		struct netdev_hw_addr *ha;
1085 		int i;
1086 
1087 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1088 
1089 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1090 
1091 		i = 1;
1092 		netdev_for_each_mc_addr(ha, netdev) {
1093 			/* set first 32 into Perfect Filter */
1094 			if (i < 33) {
1095 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1096 			} else {
1097 				u32 bitnum = lan78xx_hash(ha->addr);
1098 
1099 				pdata->mchash_table[bitnum / 32] |=
1100 							(1 << (bitnum % 32));
1101 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1102 			}
1103 			i++;
1104 		}
1105 	}
1106 
1107 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1108 
1109 	/* defer register writes to a sleepable context */
1110 	schedule_work(&pdata->set_multicast);
1111 }
1112 
1113 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1114 				      u16 lcladv, u16 rmtadv)
1115 {
1116 	u32 flow = 0, fct_flow = 0;
1117 	int ret;
1118 	u8 cap;
1119 
1120 	if (dev->fc_autoneg)
1121 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1122 	else
1123 		cap = dev->fc_request_control;
1124 
1125 	if (cap & FLOW_CTRL_TX)
1126 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1127 
1128 	if (cap & FLOW_CTRL_RX)
1129 		flow |= FLOW_CR_RX_FCEN_;
1130 
1131 	if (dev->udev->speed == USB_SPEED_SUPER)
1132 		fct_flow = 0x817;
1133 	else if (dev->udev->speed == USB_SPEED_HIGH)
1134 		fct_flow = 0x211;
1135 
1136 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1137 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1138 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1139 
1140 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1141 
1142 	/* threshold value should be set before enabling flow */
1143 	ret = lan78xx_write_reg(dev, FLOW, flow);
1144 
1145 	return 0;
1146 }
1147 
1148 static int lan78xx_link_reset(struct lan78xx_net *dev)
1149 {
1150 	struct phy_device *phydev = dev->net->phydev;
1151 	struct ethtool_link_ksettings ecmd;
1152 	int ladv, radv, ret;
1153 	u32 buf;
1154 
1155 	/* clear LAN78xx interrupt status */
1156 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1157 	if (unlikely(ret < 0))
1158 		return -EIO;
1159 
1160 	phy_read_status(phydev);
1161 
1162 	if (!phydev->link && dev->link_on) {
1163 		dev->link_on = false;
1164 
1165 		/* reset MAC */
1166 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1167 		if (unlikely(ret < 0))
1168 			return -EIO;
1169 		buf |= MAC_CR_RST_;
1170 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1171 		if (unlikely(ret < 0))
1172 			return -EIO;
1173 
1174 		del_timer(&dev->stat_monitor);
1175 	} else if (phydev->link && !dev->link_on) {
1176 		dev->link_on = true;
1177 
1178 		phy_ethtool_ksettings_get(phydev, &ecmd);
1179 
1180 		if (dev->udev->speed == USB_SPEED_SUPER) {
1181 			if (ecmd.base.speed == 1000) {
1182 				/* disable U2 */
1183 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1184 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1185 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1186 				/* enable U1 */
1187 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1188 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1189 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1190 			} else {
1191 				/* enable U1 & U2 */
1192 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1193 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1194 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1195 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1196 			}
1197 		}
1198 
1199 		ladv = phy_read(phydev, MII_ADVERTISE);
1200 		if (ladv < 0)
1201 			return ladv;
1202 
1203 		radv = phy_read(phydev, MII_LPA);
1204 		if (radv < 0)
1205 			return radv;
1206 
1207 		netif_dbg(dev, link, dev->net,
1208 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1209 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1210 
1211 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1212 						 radv);
1213 
1214 		if (!timer_pending(&dev->stat_monitor)) {
1215 			dev->delta = 1;
1216 			mod_timer(&dev->stat_monitor,
1217 				  jiffies + STAT_UPDATE_TIMER);
1218 		}
1219 	}
1220 
1221 	return ret;
1222 }
1223 
1224 /* some work can't be done in tasklets, so we use keventd
1225  *
1226  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1227  * but tasklet_schedule() doesn't.	hope the failure is rare.
1228  */
1229 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1230 {
1231 	set_bit(work, &dev->flags);
1232 	if (!schedule_delayed_work(&dev->wq, 0))
1233 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1234 }
1235 
1236 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1237 {
1238 	u32 intdata;
1239 
1240 	if (urb->actual_length != 4) {
1241 		netdev_warn(dev->net,
1242 			    "unexpected urb length %d", urb->actual_length);
1243 		return;
1244 	}
1245 
1246 	memcpy(&intdata, urb->transfer_buffer, 4);
1247 	le32_to_cpus(&intdata);
1248 
1249 	if (intdata & INT_ENP_PHY_INT) {
1250 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1251 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1252 
1253 		if (dev->domain_data.phyirq > 0)
1254 			generic_handle_irq(dev->domain_data.phyirq);
1255 	} else
1256 		netdev_warn(dev->net,
1257 			    "unexpected interrupt: 0x%08x\n", intdata);
1258 }
1259 
1260 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1261 {
1262 	return MAX_EEPROM_SIZE;
1263 }
1264 
1265 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1266 				      struct ethtool_eeprom *ee, u8 *data)
1267 {
1268 	struct lan78xx_net *dev = netdev_priv(netdev);
1269 	int ret;
1270 
1271 	ret = usb_autopm_get_interface(dev->intf);
1272 	if (ret)
1273 		return ret;
1274 
1275 	ee->magic = LAN78XX_EEPROM_MAGIC;
1276 
1277 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1278 
1279 	usb_autopm_put_interface(dev->intf);
1280 
1281 	return ret;
1282 }
1283 
1284 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1285 				      struct ethtool_eeprom *ee, u8 *data)
1286 {
1287 	struct lan78xx_net *dev = netdev_priv(netdev);
1288 	int ret;
1289 
1290 	ret = usb_autopm_get_interface(dev->intf);
1291 	if (ret)
1292 		return ret;
1293 
1294 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1295 	 * to load data from EEPROM
1296 	 */
1297 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1298 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1299 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1300 		 (ee->offset == 0) &&
1301 		 (ee->len == 512) &&
1302 		 (data[0] == OTP_INDICATOR_1))
1303 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1304 
1305 	usb_autopm_put_interface(dev->intf);
1306 
1307 	return ret;
1308 }
1309 
1310 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1311 				u8 *data)
1312 {
1313 	if (stringset == ETH_SS_STATS)
1314 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1315 }
1316 
1317 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1318 {
1319 	if (sset == ETH_SS_STATS)
1320 		return ARRAY_SIZE(lan78xx_gstrings);
1321 	else
1322 		return -EOPNOTSUPP;
1323 }
1324 
1325 static void lan78xx_get_stats(struct net_device *netdev,
1326 			      struct ethtool_stats *stats, u64 *data)
1327 {
1328 	struct lan78xx_net *dev = netdev_priv(netdev);
1329 
1330 	lan78xx_update_stats(dev);
1331 
1332 	mutex_lock(&dev->stats.access_lock);
1333 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1334 	mutex_unlock(&dev->stats.access_lock);
1335 }
1336 
1337 static void lan78xx_get_wol(struct net_device *netdev,
1338 			    struct ethtool_wolinfo *wol)
1339 {
1340 	struct lan78xx_net *dev = netdev_priv(netdev);
1341 	int ret;
1342 	u32 buf;
1343 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1344 
1345 	if (usb_autopm_get_interface(dev->intf) < 0)
1346 			return;
1347 
1348 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1349 	if (unlikely(ret < 0)) {
1350 		wol->supported = 0;
1351 		wol->wolopts = 0;
1352 	} else {
1353 		if (buf & USB_CFG_RMT_WKP_) {
1354 			wol->supported = WAKE_ALL;
1355 			wol->wolopts = pdata->wol;
1356 		} else {
1357 			wol->supported = 0;
1358 			wol->wolopts = 0;
1359 		}
1360 	}
1361 
1362 	usb_autopm_put_interface(dev->intf);
1363 }
1364 
1365 static int lan78xx_set_wol(struct net_device *netdev,
1366 			   struct ethtool_wolinfo *wol)
1367 {
1368 	struct lan78xx_net *dev = netdev_priv(netdev);
1369 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1370 	int ret;
1371 
1372 	ret = usb_autopm_get_interface(dev->intf);
1373 	if (ret < 0)
1374 		return ret;
1375 
1376 	pdata->wol = 0;
1377 	if (wol->wolopts & WAKE_UCAST)
1378 		pdata->wol |= WAKE_UCAST;
1379 	if (wol->wolopts & WAKE_MCAST)
1380 		pdata->wol |= WAKE_MCAST;
1381 	if (wol->wolopts & WAKE_BCAST)
1382 		pdata->wol |= WAKE_BCAST;
1383 	if (wol->wolopts & WAKE_MAGIC)
1384 		pdata->wol |= WAKE_MAGIC;
1385 	if (wol->wolopts & WAKE_PHY)
1386 		pdata->wol |= WAKE_PHY;
1387 	if (wol->wolopts & WAKE_ARP)
1388 		pdata->wol |= WAKE_ARP;
1389 
1390 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1391 
1392 	phy_ethtool_set_wol(netdev->phydev, wol);
1393 
1394 	usb_autopm_put_interface(dev->intf);
1395 
1396 	return ret;
1397 }
1398 
1399 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1400 {
1401 	struct lan78xx_net *dev = netdev_priv(net);
1402 	struct phy_device *phydev = net->phydev;
1403 	int ret;
1404 	u32 buf;
1405 
1406 	ret = usb_autopm_get_interface(dev->intf);
1407 	if (ret < 0)
1408 		return ret;
1409 
1410 	ret = phy_ethtool_get_eee(phydev, edata);
1411 	if (ret < 0)
1412 		goto exit;
1413 
1414 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1415 	if (buf & MAC_CR_EEE_EN_) {
1416 		edata->eee_enabled = true;
1417 		edata->eee_active = !!(edata->advertised &
1418 				       edata->lp_advertised);
1419 		edata->tx_lpi_enabled = true;
1420 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1421 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1422 		edata->tx_lpi_timer = buf;
1423 	} else {
1424 		edata->eee_enabled = false;
1425 		edata->eee_active = false;
1426 		edata->tx_lpi_enabled = false;
1427 		edata->tx_lpi_timer = 0;
1428 	}
1429 
1430 	ret = 0;
1431 exit:
1432 	usb_autopm_put_interface(dev->intf);
1433 
1434 	return ret;
1435 }
1436 
1437 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1438 {
1439 	struct lan78xx_net *dev = netdev_priv(net);
1440 	int ret;
1441 	u32 buf;
1442 
1443 	ret = usb_autopm_get_interface(dev->intf);
1444 	if (ret < 0)
1445 		return ret;
1446 
1447 	if (edata->eee_enabled) {
1448 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1449 		buf |= MAC_CR_EEE_EN_;
1450 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1451 
1452 		phy_ethtool_set_eee(net->phydev, edata);
1453 
1454 		buf = (u32)edata->tx_lpi_timer;
1455 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1456 	} else {
1457 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1458 		buf &= ~MAC_CR_EEE_EN_;
1459 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1460 	}
1461 
1462 	usb_autopm_put_interface(dev->intf);
1463 
1464 	return 0;
1465 }
1466 
1467 static u32 lan78xx_get_link(struct net_device *net)
1468 {
1469 	phy_read_status(net->phydev);
1470 
1471 	return net->phydev->link;
1472 }
1473 
1474 static void lan78xx_get_drvinfo(struct net_device *net,
1475 				struct ethtool_drvinfo *info)
1476 {
1477 	struct lan78xx_net *dev = netdev_priv(net);
1478 
1479 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1480 	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1481 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1482 }
1483 
1484 static u32 lan78xx_get_msglevel(struct net_device *net)
1485 {
1486 	struct lan78xx_net *dev = netdev_priv(net);
1487 
1488 	return dev->msg_enable;
1489 }
1490 
1491 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1492 {
1493 	struct lan78xx_net *dev = netdev_priv(net);
1494 
1495 	dev->msg_enable = level;
1496 }
1497 
1498 static int lan78xx_get_link_ksettings(struct net_device *net,
1499 				      struct ethtool_link_ksettings *cmd)
1500 {
1501 	struct lan78xx_net *dev = netdev_priv(net);
1502 	struct phy_device *phydev = net->phydev;
1503 	int ret;
1504 
1505 	ret = usb_autopm_get_interface(dev->intf);
1506 	if (ret < 0)
1507 		return ret;
1508 
1509 	phy_ethtool_ksettings_get(phydev, cmd);
1510 
1511 	usb_autopm_put_interface(dev->intf);
1512 
1513 	return ret;
1514 }
1515 
1516 static int lan78xx_set_link_ksettings(struct net_device *net,
1517 				      const struct ethtool_link_ksettings *cmd)
1518 {
1519 	struct lan78xx_net *dev = netdev_priv(net);
1520 	struct phy_device *phydev = net->phydev;
1521 	int ret = 0;
1522 	int temp;
1523 
1524 	ret = usb_autopm_get_interface(dev->intf);
1525 	if (ret < 0)
1526 		return ret;
1527 
1528 	/* change speed & duplex */
1529 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1530 
1531 	if (!cmd->base.autoneg) {
1532 		/* force link down */
1533 		temp = phy_read(phydev, MII_BMCR);
1534 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1535 		mdelay(1);
1536 		phy_write(phydev, MII_BMCR, temp);
1537 	}
1538 
1539 	usb_autopm_put_interface(dev->intf);
1540 
1541 	return ret;
1542 }
1543 
1544 static void lan78xx_get_pause(struct net_device *net,
1545 			      struct ethtool_pauseparam *pause)
1546 {
1547 	struct lan78xx_net *dev = netdev_priv(net);
1548 	struct phy_device *phydev = net->phydev;
1549 	struct ethtool_link_ksettings ecmd;
1550 
1551 	phy_ethtool_ksettings_get(phydev, &ecmd);
1552 
1553 	pause->autoneg = dev->fc_autoneg;
1554 
1555 	if (dev->fc_request_control & FLOW_CTRL_TX)
1556 		pause->tx_pause = 1;
1557 
1558 	if (dev->fc_request_control & FLOW_CTRL_RX)
1559 		pause->rx_pause = 1;
1560 }
1561 
1562 static int lan78xx_set_pause(struct net_device *net,
1563 			     struct ethtool_pauseparam *pause)
1564 {
1565 	struct lan78xx_net *dev = netdev_priv(net);
1566 	struct phy_device *phydev = net->phydev;
1567 	struct ethtool_link_ksettings ecmd;
1568 	int ret;
1569 
1570 	phy_ethtool_ksettings_get(phydev, &ecmd);
1571 
1572 	if (pause->autoneg && !ecmd.base.autoneg) {
1573 		ret = -EINVAL;
1574 		goto exit;
1575 	}
1576 
1577 	dev->fc_request_control = 0;
1578 	if (pause->rx_pause)
1579 		dev->fc_request_control |= FLOW_CTRL_RX;
1580 
1581 	if (pause->tx_pause)
1582 		dev->fc_request_control |= FLOW_CTRL_TX;
1583 
1584 	if (ecmd.base.autoneg) {
1585 		u32 mii_adv;
1586 		u32 advertising;
1587 
1588 		ethtool_convert_link_mode_to_legacy_u32(
1589 			&advertising, ecmd.link_modes.advertising);
1590 
1591 		advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1592 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1593 		advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1594 
1595 		ethtool_convert_legacy_u32_to_link_mode(
1596 			ecmd.link_modes.advertising, advertising);
1597 
1598 		phy_ethtool_ksettings_set(phydev, &ecmd);
1599 	}
1600 
1601 	dev->fc_autoneg = pause->autoneg;
1602 
1603 	ret = 0;
1604 exit:
1605 	return ret;
1606 }
1607 
1608 static const struct ethtool_ops lan78xx_ethtool_ops = {
1609 	.get_link	= lan78xx_get_link,
1610 	.nway_reset	= phy_ethtool_nway_reset,
1611 	.get_drvinfo	= lan78xx_get_drvinfo,
1612 	.get_msglevel	= lan78xx_get_msglevel,
1613 	.set_msglevel	= lan78xx_set_msglevel,
1614 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1615 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1616 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1617 	.get_ethtool_stats = lan78xx_get_stats,
1618 	.get_sset_count = lan78xx_get_sset_count,
1619 	.get_strings	= lan78xx_get_strings,
1620 	.get_wol	= lan78xx_get_wol,
1621 	.set_wol	= lan78xx_set_wol,
1622 	.get_eee	= lan78xx_get_eee,
1623 	.set_eee	= lan78xx_set_eee,
1624 	.get_pauseparam	= lan78xx_get_pause,
1625 	.set_pauseparam	= lan78xx_set_pause,
1626 	.get_link_ksettings = lan78xx_get_link_ksettings,
1627 	.set_link_ksettings = lan78xx_set_link_ksettings,
1628 };
1629 
1630 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1631 {
1632 	if (!netif_running(netdev))
1633 		return -EINVAL;
1634 
1635 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1636 }
1637 
1638 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1639 {
1640 	u32 addr_lo, addr_hi;
1641 	int ret;
1642 	u8 addr[6];
1643 
1644 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1645 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1646 
1647 	addr[0] = addr_lo & 0xFF;
1648 	addr[1] = (addr_lo >> 8) & 0xFF;
1649 	addr[2] = (addr_lo >> 16) & 0xFF;
1650 	addr[3] = (addr_lo >> 24) & 0xFF;
1651 	addr[4] = addr_hi & 0xFF;
1652 	addr[5] = (addr_hi >> 8) & 0xFF;
1653 
1654 	if (!is_valid_ether_addr(addr)) {
1655 		/* reading mac address from EEPROM or OTP */
1656 		if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1657 					 addr) == 0) ||
1658 		    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1659 				      addr) == 0)) {
1660 			if (is_valid_ether_addr(addr)) {
1661 				/* eeprom values are valid so use them */
1662 				netif_dbg(dev, ifup, dev->net,
1663 					  "MAC address read from EEPROM");
1664 			} else {
1665 				/* generate random MAC */
1666 				random_ether_addr(addr);
1667 				netif_dbg(dev, ifup, dev->net,
1668 					  "MAC address set to random addr");
1669 			}
1670 
1671 			addr_lo = addr[0] | (addr[1] << 8) |
1672 				  (addr[2] << 16) | (addr[3] << 24);
1673 			addr_hi = addr[4] | (addr[5] << 8);
1674 
1675 			ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1676 			ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1677 		} else {
1678 			/* generate random MAC */
1679 			random_ether_addr(addr);
1680 			netif_dbg(dev, ifup, dev->net,
1681 				  "MAC address set to random addr");
1682 		}
1683 	}
1684 
1685 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1686 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1687 
1688 	ether_addr_copy(dev->net->dev_addr, addr);
1689 }
1690 
1691 /* MDIO read and write wrappers for phylib */
1692 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1693 {
1694 	struct lan78xx_net *dev = bus->priv;
1695 	u32 val, addr;
1696 	int ret;
1697 
1698 	ret = usb_autopm_get_interface(dev->intf);
1699 	if (ret < 0)
1700 		return ret;
1701 
1702 	mutex_lock(&dev->phy_mutex);
1703 
1704 	/* confirm MII not busy */
1705 	ret = lan78xx_phy_wait_not_busy(dev);
1706 	if (ret < 0)
1707 		goto done;
1708 
1709 	/* set the address, index & direction (read from PHY) */
1710 	addr = mii_access(phy_id, idx, MII_READ);
1711 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1712 
1713 	ret = lan78xx_phy_wait_not_busy(dev);
1714 	if (ret < 0)
1715 		goto done;
1716 
1717 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1718 
1719 	ret = (int)(val & 0xFFFF);
1720 
1721 done:
1722 	mutex_unlock(&dev->phy_mutex);
1723 	usb_autopm_put_interface(dev->intf);
1724 
1725 	return ret;
1726 }
1727 
1728 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1729 				 u16 regval)
1730 {
1731 	struct lan78xx_net *dev = bus->priv;
1732 	u32 val, addr;
1733 	int ret;
1734 
1735 	ret = usb_autopm_get_interface(dev->intf);
1736 	if (ret < 0)
1737 		return ret;
1738 
1739 	mutex_lock(&dev->phy_mutex);
1740 
1741 	/* confirm MII not busy */
1742 	ret = lan78xx_phy_wait_not_busy(dev);
1743 	if (ret < 0)
1744 		goto done;
1745 
1746 	val = (u32)regval;
1747 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1748 
1749 	/* set the address, index & direction (write to PHY) */
1750 	addr = mii_access(phy_id, idx, MII_WRITE);
1751 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1752 
1753 	ret = lan78xx_phy_wait_not_busy(dev);
1754 	if (ret < 0)
1755 		goto done;
1756 
1757 done:
1758 	mutex_unlock(&dev->phy_mutex);
1759 	usb_autopm_put_interface(dev->intf);
1760 	return 0;
1761 }
1762 
1763 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1764 {
1765 	int ret;
1766 
1767 	dev->mdiobus = mdiobus_alloc();
1768 	if (!dev->mdiobus) {
1769 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1770 		return -ENOMEM;
1771 	}
1772 
1773 	dev->mdiobus->priv = (void *)dev;
1774 	dev->mdiobus->read = lan78xx_mdiobus_read;
1775 	dev->mdiobus->write = lan78xx_mdiobus_write;
1776 	dev->mdiobus->name = "lan78xx-mdiobus";
1777 
1778 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1779 		 dev->udev->bus->busnum, dev->udev->devnum);
1780 
1781 	switch (dev->chipid) {
1782 	case ID_REV_CHIP_ID_7800_:
1783 	case ID_REV_CHIP_ID_7850_:
1784 		/* set to internal PHY id */
1785 		dev->mdiobus->phy_mask = ~(1 << 1);
1786 		break;
1787 	case ID_REV_CHIP_ID_7801_:
1788 		/* scan thru PHYAD[2..0] */
1789 		dev->mdiobus->phy_mask = ~(0xFF);
1790 		break;
1791 	}
1792 
1793 	ret = mdiobus_register(dev->mdiobus);
1794 	if (ret) {
1795 		netdev_err(dev->net, "can't register MDIO bus\n");
1796 		goto exit1;
1797 	}
1798 
1799 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1800 	return 0;
1801 exit1:
1802 	mdiobus_free(dev->mdiobus);
1803 	return ret;
1804 }
1805 
1806 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1807 {
1808 	mdiobus_unregister(dev->mdiobus);
1809 	mdiobus_free(dev->mdiobus);
1810 }
1811 
1812 static void lan78xx_link_status_change(struct net_device *net)
1813 {
1814 	struct phy_device *phydev = net->phydev;
1815 	int ret, temp;
1816 
1817 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1818 	 * when cable is switched between long(~50+m) and short one.
1819 	 * As workaround, set to 10 before setting to 100
1820 	 * at forced 100 F/H mode.
1821 	 */
1822 	if (!phydev->autoneg && (phydev->speed == 100)) {
1823 		/* disable phy interrupt */
1824 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1825 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1826 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1827 
1828 		temp = phy_read(phydev, MII_BMCR);
1829 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1830 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1831 		temp |= BMCR_SPEED100;
1832 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1833 
1834 		/* clear pending interrupt generated while workaround */
1835 		temp = phy_read(phydev, LAN88XX_INT_STS);
1836 
1837 		/* enable phy interrupt back */
1838 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1839 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1840 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1841 	}
1842 }
1843 
1844 static int irq_map(struct irq_domain *d, unsigned int irq,
1845 		   irq_hw_number_t hwirq)
1846 {
1847 	struct irq_domain_data *data = d->host_data;
1848 
1849 	irq_set_chip_data(irq, data);
1850 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1851 	irq_set_noprobe(irq);
1852 
1853 	return 0;
1854 }
1855 
1856 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1857 {
1858 	irq_set_chip_and_handler(irq, NULL, NULL);
1859 	irq_set_chip_data(irq, NULL);
1860 }
1861 
1862 static const struct irq_domain_ops chip_domain_ops = {
1863 	.map	= irq_map,
1864 	.unmap	= irq_unmap,
1865 };
1866 
1867 static void lan78xx_irq_mask(struct irq_data *irqd)
1868 {
1869 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1870 
1871 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1872 }
1873 
1874 static void lan78xx_irq_unmask(struct irq_data *irqd)
1875 {
1876 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1877 
1878 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1879 }
1880 
1881 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1882 {
1883 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1884 
1885 	mutex_lock(&data->irq_lock);
1886 }
1887 
1888 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1889 {
1890 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1891 	struct lan78xx_net *dev =
1892 			container_of(data, struct lan78xx_net, domain_data);
1893 	u32 buf;
1894 	int ret;
1895 
1896 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
1897 	 * are only two callbacks executed in non-atomic contex.
1898 	 */
1899 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1900 	if (buf != data->irqenable)
1901 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1902 
1903 	mutex_unlock(&data->irq_lock);
1904 }
1905 
1906 static struct irq_chip lan78xx_irqchip = {
1907 	.name			= "lan78xx-irqs",
1908 	.irq_mask		= lan78xx_irq_mask,
1909 	.irq_unmask		= lan78xx_irq_unmask,
1910 	.irq_bus_lock		= lan78xx_irq_bus_lock,
1911 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
1912 };
1913 
1914 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1915 {
1916 	struct device_node *of_node;
1917 	struct irq_domain *irqdomain;
1918 	unsigned int irqmap = 0;
1919 	u32 buf;
1920 	int ret = 0;
1921 
1922 	of_node = dev->udev->dev.parent->of_node;
1923 
1924 	mutex_init(&dev->domain_data.irq_lock);
1925 
1926 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1927 	dev->domain_data.irqenable = buf;
1928 
1929 	dev->domain_data.irqchip = &lan78xx_irqchip;
1930 	dev->domain_data.irq_handler = handle_simple_irq;
1931 
1932 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1933 					  &chip_domain_ops, &dev->domain_data);
1934 	if (irqdomain) {
1935 		/* create mapping for PHY interrupt */
1936 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1937 		if (!irqmap) {
1938 			irq_domain_remove(irqdomain);
1939 
1940 			irqdomain = NULL;
1941 			ret = -EINVAL;
1942 		}
1943 	} else {
1944 		ret = -EINVAL;
1945 	}
1946 
1947 	dev->domain_data.irqdomain = irqdomain;
1948 	dev->domain_data.phyirq = irqmap;
1949 
1950 	return ret;
1951 }
1952 
1953 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1954 {
1955 	if (dev->domain_data.phyirq > 0) {
1956 		irq_dispose_mapping(dev->domain_data.phyirq);
1957 
1958 		if (dev->domain_data.irqdomain)
1959 			irq_domain_remove(dev->domain_data.irqdomain);
1960 	}
1961 	dev->domain_data.phyirq = 0;
1962 	dev->domain_data.irqdomain = NULL;
1963 }
1964 
1965 static int lan8835_fixup(struct phy_device *phydev)
1966 {
1967 	int buf;
1968 	int ret;
1969 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1970 
1971 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1972 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1973 	buf &= ~0x1800;
1974 	buf |= 0x0800;
1975 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1976 
1977 	/* RGMII MAC TXC Delay Enable */
1978 	ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1979 				MAC_RGMII_ID_TXC_DELAY_EN_);
1980 
1981 	/* RGMII TX DLL Tune Adjust */
1982 	ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1983 
1984 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1985 
1986 	return 1;
1987 }
1988 
1989 static int ksz9031rnx_fixup(struct phy_device *phydev)
1990 {
1991 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1992 
1993 	/* Micrel9301RNX PHY configuration */
1994 	/* RGMII Control Signal Pad Skew */
1995 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1996 	/* RGMII RX Data Pad Skew */
1997 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1998 	/* RGMII RX Clock Pad Skew */
1999 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2000 
2001 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2002 
2003 	return 1;
2004 }
2005 
2006 static int lan78xx_phy_init(struct lan78xx_net *dev)
2007 {
2008 	int ret;
2009 	u32 mii_adv;
2010 	struct phy_device *phydev;
2011 
2012 	phydev = phy_find_first(dev->mdiobus);
2013 	if (!phydev) {
2014 		netdev_err(dev->net, "no PHY found\n");
2015 		return -EIO;
2016 	}
2017 
2018 	if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2019 	    (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2020 		phydev->is_internal = true;
2021 		dev->interface = PHY_INTERFACE_MODE_GMII;
2022 
2023 	} else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2024 		if (!phydev->drv) {
2025 			netdev_err(dev->net, "no PHY driver found\n");
2026 			return -EIO;
2027 		}
2028 
2029 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2030 
2031 		/* external PHY fixup for KSZ9031RNX */
2032 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2033 						 ksz9031rnx_fixup);
2034 		if (ret < 0) {
2035 			netdev_err(dev->net, "fail to register fixup\n");
2036 			return ret;
2037 		}
2038 		/* external PHY fixup for LAN8835 */
2039 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2040 						 lan8835_fixup);
2041 		if (ret < 0) {
2042 			netdev_err(dev->net, "fail to register fixup\n");
2043 			return ret;
2044 		}
2045 		/* add more external PHY fixup here if needed */
2046 
2047 		phydev->is_internal = false;
2048 	} else {
2049 		netdev_err(dev->net, "unknown ID found\n");
2050 		ret = -EIO;
2051 		goto error;
2052 	}
2053 
2054 	/* if phyirq is not set, use polling mode in phylib */
2055 	if (dev->domain_data.phyirq > 0)
2056 		phydev->irq = dev->domain_data.phyirq;
2057 	else
2058 		phydev->irq = 0;
2059 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2060 
2061 	/* set to AUTOMDIX */
2062 	phydev->mdix = ETH_TP_MDI_AUTO;
2063 
2064 	ret = phy_connect_direct(dev->net, phydev,
2065 				 lan78xx_link_status_change,
2066 				 dev->interface);
2067 	if (ret) {
2068 		netdev_err(dev->net, "can't attach PHY to %s\n",
2069 			   dev->mdiobus->id);
2070 		return -EIO;
2071 	}
2072 
2073 	/* MAC doesn't support 1000T Half */
2074 	phydev->supported &= ~SUPPORTED_1000baseT_Half;
2075 
2076 	/* support both flow controls */
2077 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2078 	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2079 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2080 	phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2081 
2082 	genphy_config_aneg(phydev);
2083 
2084 	dev->fc_autoneg = phydev->autoneg;
2085 
2086 	return 0;
2087 
2088 error:
2089 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2090 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2091 
2092 	return ret;
2093 }
2094 
2095 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2096 {
2097 	int ret = 0;
2098 	u32 buf;
2099 	bool rxenabled;
2100 
2101 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2102 
2103 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2104 
2105 	if (rxenabled) {
2106 		buf &= ~MAC_RX_RXEN_;
2107 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2108 	}
2109 
2110 	/* add 4 to size for FCS */
2111 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2112 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2113 
2114 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2115 
2116 	if (rxenabled) {
2117 		buf |= MAC_RX_RXEN_;
2118 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2119 	}
2120 
2121 	return 0;
2122 }
2123 
2124 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2125 {
2126 	struct sk_buff *skb;
2127 	unsigned long flags;
2128 	int count = 0;
2129 
2130 	spin_lock_irqsave(&q->lock, flags);
2131 	while (!skb_queue_empty(q)) {
2132 		struct skb_data	*entry;
2133 		struct urb *urb;
2134 		int ret;
2135 
2136 		skb_queue_walk(q, skb) {
2137 			entry = (struct skb_data *)skb->cb;
2138 			if (entry->state != unlink_start)
2139 				goto found;
2140 		}
2141 		break;
2142 found:
2143 		entry->state = unlink_start;
2144 		urb = entry->urb;
2145 
2146 		/* Get reference count of the URB to avoid it to be
2147 		 * freed during usb_unlink_urb, which may trigger
2148 		 * use-after-free problem inside usb_unlink_urb since
2149 		 * usb_unlink_urb is always racing with .complete
2150 		 * handler(include defer_bh).
2151 		 */
2152 		usb_get_urb(urb);
2153 		spin_unlock_irqrestore(&q->lock, flags);
2154 		/* during some PM-driven resume scenarios,
2155 		 * these (async) unlinks complete immediately
2156 		 */
2157 		ret = usb_unlink_urb(urb);
2158 		if (ret != -EINPROGRESS && ret != 0)
2159 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2160 		else
2161 			count++;
2162 		usb_put_urb(urb);
2163 		spin_lock_irqsave(&q->lock, flags);
2164 	}
2165 	spin_unlock_irqrestore(&q->lock, flags);
2166 	return count;
2167 }
2168 
2169 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2170 {
2171 	struct lan78xx_net *dev = netdev_priv(netdev);
2172 	int ll_mtu = new_mtu + netdev->hard_header_len;
2173 	int old_hard_mtu = dev->hard_mtu;
2174 	int old_rx_urb_size = dev->rx_urb_size;
2175 	int ret;
2176 
2177 	/* no second zero-length packet read wanted after mtu-sized packets */
2178 	if ((ll_mtu % dev->maxpacket) == 0)
2179 		return -EDOM;
2180 
2181 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2182 
2183 	netdev->mtu = new_mtu;
2184 
2185 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2186 	if (dev->rx_urb_size == old_hard_mtu) {
2187 		dev->rx_urb_size = dev->hard_mtu;
2188 		if (dev->rx_urb_size > old_rx_urb_size) {
2189 			if (netif_running(dev->net)) {
2190 				unlink_urbs(dev, &dev->rxq);
2191 				tasklet_schedule(&dev->bh);
2192 			}
2193 		}
2194 	}
2195 
2196 	return 0;
2197 }
2198 
2199 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2200 {
2201 	struct lan78xx_net *dev = netdev_priv(netdev);
2202 	struct sockaddr *addr = p;
2203 	u32 addr_lo, addr_hi;
2204 	int ret;
2205 
2206 	if (netif_running(netdev))
2207 		return -EBUSY;
2208 
2209 	if (!is_valid_ether_addr(addr->sa_data))
2210 		return -EADDRNOTAVAIL;
2211 
2212 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2213 
2214 	addr_lo = netdev->dev_addr[0] |
2215 		  netdev->dev_addr[1] << 8 |
2216 		  netdev->dev_addr[2] << 16 |
2217 		  netdev->dev_addr[3] << 24;
2218 	addr_hi = netdev->dev_addr[4] |
2219 		  netdev->dev_addr[5] << 8;
2220 
2221 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2222 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2223 
2224 	return 0;
2225 }
2226 
2227 /* Enable or disable Rx checksum offload engine */
2228 static int lan78xx_set_features(struct net_device *netdev,
2229 				netdev_features_t features)
2230 {
2231 	struct lan78xx_net *dev = netdev_priv(netdev);
2232 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2233 	unsigned long flags;
2234 	int ret;
2235 
2236 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2237 
2238 	if (features & NETIF_F_RXCSUM) {
2239 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2240 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2241 	} else {
2242 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2243 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2244 	}
2245 
2246 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2247 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2248 	else
2249 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2250 
2251 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2252 
2253 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2254 
2255 	return 0;
2256 }
2257 
2258 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2259 {
2260 	struct lan78xx_priv *pdata =
2261 			container_of(param, struct lan78xx_priv, set_vlan);
2262 	struct lan78xx_net *dev = pdata->dev;
2263 
2264 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2265 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2266 }
2267 
2268 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2269 				   __be16 proto, u16 vid)
2270 {
2271 	struct lan78xx_net *dev = netdev_priv(netdev);
2272 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2273 	u16 vid_bit_index;
2274 	u16 vid_dword_index;
2275 
2276 	vid_dword_index = (vid >> 5) & 0x7F;
2277 	vid_bit_index = vid & 0x1F;
2278 
2279 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2280 
2281 	/* defer register writes to a sleepable context */
2282 	schedule_work(&pdata->set_vlan);
2283 
2284 	return 0;
2285 }
2286 
2287 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2288 				    __be16 proto, u16 vid)
2289 {
2290 	struct lan78xx_net *dev = netdev_priv(netdev);
2291 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2292 	u16 vid_bit_index;
2293 	u16 vid_dword_index;
2294 
2295 	vid_dword_index = (vid >> 5) & 0x7F;
2296 	vid_bit_index = vid & 0x1F;
2297 
2298 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2299 
2300 	/* defer register writes to a sleepable context */
2301 	schedule_work(&pdata->set_vlan);
2302 
2303 	return 0;
2304 }
2305 
2306 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2307 {
2308 	int ret;
2309 	u32 buf;
2310 	u32 regs[6] = { 0 };
2311 
2312 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2313 	if (buf & USB_CFG1_LTM_ENABLE_) {
2314 		u8 temp[2];
2315 		/* Get values from EEPROM first */
2316 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2317 			if (temp[0] == 24) {
2318 				ret = lan78xx_read_raw_eeprom(dev,
2319 							      temp[1] * 2,
2320 							      24,
2321 							      (u8 *)regs);
2322 				if (ret < 0)
2323 					return;
2324 			}
2325 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2326 			if (temp[0] == 24) {
2327 				ret = lan78xx_read_raw_otp(dev,
2328 							   temp[1] * 2,
2329 							   24,
2330 							   (u8 *)regs);
2331 				if (ret < 0)
2332 					return;
2333 			}
2334 		}
2335 	}
2336 
2337 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2338 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2339 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2340 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2341 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2342 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2343 }
2344 
2345 static int lan78xx_reset(struct lan78xx_net *dev)
2346 {
2347 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2348 	u32 buf;
2349 	int ret = 0;
2350 	unsigned long timeout;
2351 	u8 sig;
2352 
2353 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2354 	buf |= HW_CFG_LRST_;
2355 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2356 
2357 	timeout = jiffies + HZ;
2358 	do {
2359 		mdelay(1);
2360 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2361 		if (time_after(jiffies, timeout)) {
2362 			netdev_warn(dev->net,
2363 				    "timeout on completion of LiteReset");
2364 			return -EIO;
2365 		}
2366 	} while (buf & HW_CFG_LRST_);
2367 
2368 	lan78xx_init_mac_address(dev);
2369 
2370 	/* save DEVID for later usage */
2371 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2372 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2373 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2374 
2375 	/* Respond to the IN token with a NAK */
2376 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2377 	buf |= USB_CFG_BIR_;
2378 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2379 
2380 	/* Init LTM */
2381 	lan78xx_init_ltm(dev);
2382 
2383 	if (dev->udev->speed == USB_SPEED_SUPER) {
2384 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2385 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2386 		dev->rx_qlen = 4;
2387 		dev->tx_qlen = 4;
2388 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2389 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2390 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2391 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2392 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2393 	} else {
2394 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2395 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2396 		dev->rx_qlen = 4;
2397 		dev->tx_qlen = 4;
2398 	}
2399 
2400 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2401 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2402 
2403 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2404 	buf |= HW_CFG_MEF_;
2405 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2406 
2407 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2408 	buf |= USB_CFG_BCE_;
2409 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2410 
2411 	/* set FIFO sizes */
2412 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2413 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2414 
2415 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2416 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2417 
2418 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2419 	ret = lan78xx_write_reg(dev, FLOW, 0);
2420 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2421 
2422 	/* Don't need rfe_ctl_lock during initialisation */
2423 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2424 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2425 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2426 
2427 	/* Enable or disable checksum offload engines */
2428 	lan78xx_set_features(dev->net, dev->net->features);
2429 
2430 	lan78xx_set_multicast(dev->net);
2431 
2432 	/* reset PHY */
2433 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2434 	buf |= PMT_CTL_PHY_RST_;
2435 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2436 
2437 	timeout = jiffies + HZ;
2438 	do {
2439 		mdelay(1);
2440 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2441 		if (time_after(jiffies, timeout)) {
2442 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2443 			return -EIO;
2444 		}
2445 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2446 
2447 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2448 	/* LAN7801 only has RGMII mode */
2449 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2450 		buf &= ~MAC_CR_GMII_EN_;
2451 
2452 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2453 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2454 		if (!ret && sig != EEPROM_INDICATOR) {
2455 			/* Implies there is no external eeprom. Set mac speed */
2456 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2457 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2458 		}
2459 	}
2460 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2461 
2462 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2463 	buf |= MAC_TX_TXEN_;
2464 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2465 
2466 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2467 	buf |= FCT_TX_CTL_EN_;
2468 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2469 
2470 	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2471 
2472 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2473 	buf |= MAC_RX_RXEN_;
2474 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2475 
2476 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2477 	buf |= FCT_RX_CTL_EN_;
2478 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2479 
2480 	return 0;
2481 }
2482 
2483 static void lan78xx_init_stats(struct lan78xx_net *dev)
2484 {
2485 	u32 *p;
2486 	int i;
2487 
2488 	/* initialize for stats update
2489 	 * some counters are 20bits and some are 32bits
2490 	 */
2491 	p = (u32 *)&dev->stats.rollover_max;
2492 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2493 		p[i] = 0xFFFFF;
2494 
2495 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2496 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2497 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2498 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2499 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2500 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2501 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2502 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2503 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2504 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2505 
2506 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2507 }
2508 
2509 static int lan78xx_open(struct net_device *net)
2510 {
2511 	struct lan78xx_net *dev = netdev_priv(net);
2512 	int ret;
2513 
2514 	ret = usb_autopm_get_interface(dev->intf);
2515 	if (ret < 0)
2516 		goto out;
2517 
2518 	phy_start(net->phydev);
2519 
2520 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2521 
2522 	/* for Link Check */
2523 	if (dev->urb_intr) {
2524 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2525 		if (ret < 0) {
2526 			netif_err(dev, ifup, dev->net,
2527 				  "intr submit %d\n", ret);
2528 			goto done;
2529 		}
2530 	}
2531 
2532 	lan78xx_init_stats(dev);
2533 
2534 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2535 
2536 	netif_start_queue(net);
2537 
2538 	dev->link_on = false;
2539 
2540 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2541 done:
2542 	usb_autopm_put_interface(dev->intf);
2543 
2544 out:
2545 	return ret;
2546 }
2547 
2548 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2549 {
2550 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2551 	DECLARE_WAITQUEUE(wait, current);
2552 	int temp;
2553 
2554 	/* ensure there are no more active urbs */
2555 	add_wait_queue(&unlink_wakeup, &wait);
2556 	set_current_state(TASK_UNINTERRUPTIBLE);
2557 	dev->wait = &unlink_wakeup;
2558 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2559 
2560 	/* maybe wait for deletions to finish. */
2561 	while (!skb_queue_empty(&dev->rxq) &&
2562 	       !skb_queue_empty(&dev->txq) &&
2563 	       !skb_queue_empty(&dev->done)) {
2564 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2565 		set_current_state(TASK_UNINTERRUPTIBLE);
2566 		netif_dbg(dev, ifdown, dev->net,
2567 			  "waited for %d urb completions\n", temp);
2568 	}
2569 	set_current_state(TASK_RUNNING);
2570 	dev->wait = NULL;
2571 	remove_wait_queue(&unlink_wakeup, &wait);
2572 }
2573 
2574 static int lan78xx_stop(struct net_device *net)
2575 {
2576 	struct lan78xx_net		*dev = netdev_priv(net);
2577 
2578 	if (timer_pending(&dev->stat_monitor))
2579 		del_timer_sync(&dev->stat_monitor);
2580 
2581 	if (net->phydev)
2582 		phy_stop(net->phydev);
2583 
2584 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2585 	netif_stop_queue(net);
2586 
2587 	netif_info(dev, ifdown, dev->net,
2588 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2589 		   net->stats.rx_packets, net->stats.tx_packets,
2590 		   net->stats.rx_errors, net->stats.tx_errors);
2591 
2592 	lan78xx_terminate_urbs(dev);
2593 
2594 	usb_kill_urb(dev->urb_intr);
2595 
2596 	skb_queue_purge(&dev->rxq_pause);
2597 
2598 	/* deferred work (task, timer, softirq) must also stop.
2599 	 * can't flush_scheduled_work() until we drop rtnl (later),
2600 	 * else workers could deadlock; so make workers a NOP.
2601 	 */
2602 	dev->flags = 0;
2603 	cancel_delayed_work_sync(&dev->wq);
2604 	tasklet_kill(&dev->bh);
2605 
2606 	usb_autopm_put_interface(dev->intf);
2607 
2608 	return 0;
2609 }
2610 
2611 static int lan78xx_linearize(struct sk_buff *skb)
2612 {
2613 	return skb_linearize(skb);
2614 }
2615 
2616 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2617 				       struct sk_buff *skb, gfp_t flags)
2618 {
2619 	u32 tx_cmd_a, tx_cmd_b;
2620 
2621 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2622 		dev_kfree_skb_any(skb);
2623 		return NULL;
2624 	}
2625 
2626 	if (lan78xx_linearize(skb) < 0)
2627 		return NULL;
2628 
2629 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2630 
2631 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2632 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2633 
2634 	tx_cmd_b = 0;
2635 	if (skb_is_gso(skb)) {
2636 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2637 
2638 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2639 
2640 		tx_cmd_a |= TX_CMD_A_LSO_;
2641 	}
2642 
2643 	if (skb_vlan_tag_present(skb)) {
2644 		tx_cmd_a |= TX_CMD_A_IVTG_;
2645 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2646 	}
2647 
2648 	skb_push(skb, 4);
2649 	cpu_to_le32s(&tx_cmd_b);
2650 	memcpy(skb->data, &tx_cmd_b, 4);
2651 
2652 	skb_push(skb, 4);
2653 	cpu_to_le32s(&tx_cmd_a);
2654 	memcpy(skb->data, &tx_cmd_a, 4);
2655 
2656 	return skb;
2657 }
2658 
2659 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2660 			       struct sk_buff_head *list, enum skb_state state)
2661 {
2662 	unsigned long flags;
2663 	enum skb_state old_state;
2664 	struct skb_data *entry = (struct skb_data *)skb->cb;
2665 
2666 	spin_lock_irqsave(&list->lock, flags);
2667 	old_state = entry->state;
2668 	entry->state = state;
2669 
2670 	__skb_unlink(skb, list);
2671 	spin_unlock(&list->lock);
2672 	spin_lock(&dev->done.lock);
2673 
2674 	__skb_queue_tail(&dev->done, skb);
2675 	if (skb_queue_len(&dev->done) == 1)
2676 		tasklet_schedule(&dev->bh);
2677 	spin_unlock_irqrestore(&dev->done.lock, flags);
2678 
2679 	return old_state;
2680 }
2681 
2682 static void tx_complete(struct urb *urb)
2683 {
2684 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2685 	struct skb_data *entry = (struct skb_data *)skb->cb;
2686 	struct lan78xx_net *dev = entry->dev;
2687 
2688 	if (urb->status == 0) {
2689 		dev->net->stats.tx_packets += entry->num_of_packet;
2690 		dev->net->stats.tx_bytes += entry->length;
2691 	} else {
2692 		dev->net->stats.tx_errors++;
2693 
2694 		switch (urb->status) {
2695 		case -EPIPE:
2696 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2697 			break;
2698 
2699 		/* software-driven interface shutdown */
2700 		case -ECONNRESET:
2701 		case -ESHUTDOWN:
2702 			break;
2703 
2704 		case -EPROTO:
2705 		case -ETIME:
2706 		case -EILSEQ:
2707 			netif_stop_queue(dev->net);
2708 			break;
2709 		default:
2710 			netif_dbg(dev, tx_err, dev->net,
2711 				  "tx err %d\n", entry->urb->status);
2712 			break;
2713 		}
2714 	}
2715 
2716 	usb_autopm_put_interface_async(dev->intf);
2717 
2718 	defer_bh(dev, skb, &dev->txq, tx_done);
2719 }
2720 
2721 static void lan78xx_queue_skb(struct sk_buff_head *list,
2722 			      struct sk_buff *newsk, enum skb_state state)
2723 {
2724 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2725 
2726 	__skb_queue_tail(list, newsk);
2727 	entry->state = state;
2728 }
2729 
2730 static netdev_tx_t
2731 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2732 {
2733 	struct lan78xx_net *dev = netdev_priv(net);
2734 	struct sk_buff *skb2 = NULL;
2735 
2736 	if (skb) {
2737 		skb_tx_timestamp(skb);
2738 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2739 	}
2740 
2741 	if (skb2) {
2742 		skb_queue_tail(&dev->txq_pend, skb2);
2743 
2744 		/* throttle TX patch at slower than SUPER SPEED USB */
2745 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2746 		    (skb_queue_len(&dev->txq_pend) > 10))
2747 			netif_stop_queue(net);
2748 	} else {
2749 		netif_dbg(dev, tx_err, dev->net,
2750 			  "lan78xx_tx_prep return NULL\n");
2751 		dev->net->stats.tx_errors++;
2752 		dev->net->stats.tx_dropped++;
2753 	}
2754 
2755 	tasklet_schedule(&dev->bh);
2756 
2757 	return NETDEV_TX_OK;
2758 }
2759 
2760 static int
2761 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2762 {
2763 	int tmp;
2764 	struct usb_host_interface *alt = NULL;
2765 	struct usb_host_endpoint *in = NULL, *out = NULL;
2766 	struct usb_host_endpoint *status = NULL;
2767 
2768 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2769 		unsigned ep;
2770 
2771 		in = NULL;
2772 		out = NULL;
2773 		status = NULL;
2774 		alt = intf->altsetting + tmp;
2775 
2776 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2777 			struct usb_host_endpoint *e;
2778 			int intr = 0;
2779 
2780 			e = alt->endpoint + ep;
2781 			switch (e->desc.bmAttributes) {
2782 			case USB_ENDPOINT_XFER_INT:
2783 				if (!usb_endpoint_dir_in(&e->desc))
2784 					continue;
2785 				intr = 1;
2786 				/* FALLTHROUGH */
2787 			case USB_ENDPOINT_XFER_BULK:
2788 				break;
2789 			default:
2790 				continue;
2791 			}
2792 			if (usb_endpoint_dir_in(&e->desc)) {
2793 				if (!intr && !in)
2794 					in = e;
2795 				else if (intr && !status)
2796 					status = e;
2797 			} else {
2798 				if (!out)
2799 					out = e;
2800 			}
2801 		}
2802 		if (in && out)
2803 			break;
2804 	}
2805 	if (!alt || !in || !out)
2806 		return -EINVAL;
2807 
2808 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2809 				       in->desc.bEndpointAddress &
2810 				       USB_ENDPOINT_NUMBER_MASK);
2811 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2812 					out->desc.bEndpointAddress &
2813 					USB_ENDPOINT_NUMBER_MASK);
2814 	dev->ep_intr = status;
2815 
2816 	return 0;
2817 }
2818 
2819 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2820 {
2821 	struct lan78xx_priv *pdata = NULL;
2822 	int ret;
2823 	int i;
2824 
2825 	ret = lan78xx_get_endpoints(dev, intf);
2826 
2827 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2828 
2829 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2830 	if (!pdata) {
2831 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2832 		return -ENOMEM;
2833 	}
2834 
2835 	pdata->dev = dev;
2836 
2837 	spin_lock_init(&pdata->rfe_ctl_lock);
2838 	mutex_init(&pdata->dataport_mutex);
2839 
2840 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2841 
2842 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2843 		pdata->vlan_table[i] = 0;
2844 
2845 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2846 
2847 	dev->net->features = 0;
2848 
2849 	if (DEFAULT_TX_CSUM_ENABLE)
2850 		dev->net->features |= NETIF_F_HW_CSUM;
2851 
2852 	if (DEFAULT_RX_CSUM_ENABLE)
2853 		dev->net->features |= NETIF_F_RXCSUM;
2854 
2855 	if (DEFAULT_TSO_CSUM_ENABLE)
2856 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2857 
2858 	dev->net->hw_features = dev->net->features;
2859 
2860 	ret = lan78xx_setup_irq_domain(dev);
2861 	if (ret < 0) {
2862 		netdev_warn(dev->net,
2863 			    "lan78xx_setup_irq_domain() failed : %d", ret);
2864 		goto out1;
2865 	}
2866 
2867 	dev->net->hard_header_len += TX_OVERHEAD;
2868 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2869 
2870 	/* Init all registers */
2871 	ret = lan78xx_reset(dev);
2872 	if (ret) {
2873 		netdev_warn(dev->net, "Registers INIT FAILED....");
2874 		goto out2;
2875 	}
2876 
2877 	ret = lan78xx_mdio_init(dev);
2878 	if (ret) {
2879 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
2880 		goto out2;
2881 	}
2882 
2883 	dev->net->flags |= IFF_MULTICAST;
2884 
2885 	pdata->wol = WAKE_MAGIC;
2886 
2887 	return ret;
2888 
2889 out2:
2890 	lan78xx_remove_irq_domain(dev);
2891 
2892 out1:
2893 	netdev_warn(dev->net, "Bind routine FAILED");
2894 	cancel_work_sync(&pdata->set_multicast);
2895 	cancel_work_sync(&pdata->set_vlan);
2896 	kfree(pdata);
2897 	return ret;
2898 }
2899 
2900 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2901 {
2902 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2903 
2904 	lan78xx_remove_irq_domain(dev);
2905 
2906 	lan78xx_remove_mdio(dev);
2907 
2908 	if (pdata) {
2909 		cancel_work_sync(&pdata->set_multicast);
2910 		cancel_work_sync(&pdata->set_vlan);
2911 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2912 		kfree(pdata);
2913 		pdata = NULL;
2914 		dev->data[0] = 0;
2915 	}
2916 }
2917 
2918 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2919 				    struct sk_buff *skb,
2920 				    u32 rx_cmd_a, u32 rx_cmd_b)
2921 {
2922 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2923 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2924 		skb->ip_summed = CHECKSUM_NONE;
2925 	} else {
2926 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2927 		skb->ip_summed = CHECKSUM_COMPLETE;
2928 	}
2929 }
2930 
2931 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2932 {
2933 	int		status;
2934 
2935 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2936 		skb_queue_tail(&dev->rxq_pause, skb);
2937 		return;
2938 	}
2939 
2940 	dev->net->stats.rx_packets++;
2941 	dev->net->stats.rx_bytes += skb->len;
2942 
2943 	skb->protocol = eth_type_trans(skb, dev->net);
2944 
2945 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2946 		  skb->len + sizeof(struct ethhdr), skb->protocol);
2947 	memset(skb->cb, 0, sizeof(struct skb_data));
2948 
2949 	if (skb_defer_rx_timestamp(skb))
2950 		return;
2951 
2952 	status = netif_rx(skb);
2953 	if (status != NET_RX_SUCCESS)
2954 		netif_dbg(dev, rx_err, dev->net,
2955 			  "netif_rx status %d\n", status);
2956 }
2957 
2958 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2959 {
2960 	if (skb->len < dev->net->hard_header_len)
2961 		return 0;
2962 
2963 	while (skb->len > 0) {
2964 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
2965 		u16 rx_cmd_c;
2966 		struct sk_buff *skb2;
2967 		unsigned char *packet;
2968 
2969 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2970 		le32_to_cpus(&rx_cmd_a);
2971 		skb_pull(skb, sizeof(rx_cmd_a));
2972 
2973 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2974 		le32_to_cpus(&rx_cmd_b);
2975 		skb_pull(skb, sizeof(rx_cmd_b));
2976 
2977 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2978 		le16_to_cpus(&rx_cmd_c);
2979 		skb_pull(skb, sizeof(rx_cmd_c));
2980 
2981 		packet = skb->data;
2982 
2983 		/* get the packet length */
2984 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2985 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2986 
2987 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2988 			netif_dbg(dev, rx_err, dev->net,
2989 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
2990 		} else {
2991 			/* last frame in this batch */
2992 			if (skb->len == size) {
2993 				lan78xx_rx_csum_offload(dev, skb,
2994 							rx_cmd_a, rx_cmd_b);
2995 
2996 				skb_trim(skb, skb->len - 4); /* remove fcs */
2997 				skb->truesize = size + sizeof(struct sk_buff);
2998 
2999 				return 1;
3000 			}
3001 
3002 			skb2 = skb_clone(skb, GFP_ATOMIC);
3003 			if (unlikely(!skb2)) {
3004 				netdev_warn(dev->net, "Error allocating skb");
3005 				return 0;
3006 			}
3007 
3008 			skb2->len = size;
3009 			skb2->data = packet;
3010 			skb_set_tail_pointer(skb2, size);
3011 
3012 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3013 
3014 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3015 			skb2->truesize = size + sizeof(struct sk_buff);
3016 
3017 			lan78xx_skb_return(dev, skb2);
3018 		}
3019 
3020 		skb_pull(skb, size);
3021 
3022 		/* padding bytes before the next frame starts */
3023 		if (skb->len)
3024 			skb_pull(skb, align_count);
3025 	}
3026 
3027 	return 1;
3028 }
3029 
3030 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3031 {
3032 	if (!lan78xx_rx(dev, skb)) {
3033 		dev->net->stats.rx_errors++;
3034 		goto done;
3035 	}
3036 
3037 	if (skb->len) {
3038 		lan78xx_skb_return(dev, skb);
3039 		return;
3040 	}
3041 
3042 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3043 	dev->net->stats.rx_errors++;
3044 done:
3045 	skb_queue_tail(&dev->done, skb);
3046 }
3047 
3048 static void rx_complete(struct urb *urb);
3049 
3050 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3051 {
3052 	struct sk_buff *skb;
3053 	struct skb_data *entry;
3054 	unsigned long lockflags;
3055 	size_t size = dev->rx_urb_size;
3056 	int ret = 0;
3057 
3058 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3059 	if (!skb) {
3060 		usb_free_urb(urb);
3061 		return -ENOMEM;
3062 	}
3063 
3064 	entry = (struct skb_data *)skb->cb;
3065 	entry->urb = urb;
3066 	entry->dev = dev;
3067 	entry->length = 0;
3068 
3069 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3070 			  skb->data, size, rx_complete, skb);
3071 
3072 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3073 
3074 	if (netif_device_present(dev->net) &&
3075 	    netif_running(dev->net) &&
3076 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3077 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3078 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3079 		switch (ret) {
3080 		case 0:
3081 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3082 			break;
3083 		case -EPIPE:
3084 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3085 			break;
3086 		case -ENODEV:
3087 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3088 			netif_device_detach(dev->net);
3089 			break;
3090 		case -EHOSTUNREACH:
3091 			ret = -ENOLINK;
3092 			break;
3093 		default:
3094 			netif_dbg(dev, rx_err, dev->net,
3095 				  "rx submit, %d\n", ret);
3096 			tasklet_schedule(&dev->bh);
3097 		}
3098 	} else {
3099 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3100 		ret = -ENOLINK;
3101 	}
3102 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3103 	if (ret) {
3104 		dev_kfree_skb_any(skb);
3105 		usb_free_urb(urb);
3106 	}
3107 	return ret;
3108 }
3109 
3110 static void rx_complete(struct urb *urb)
3111 {
3112 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3113 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3114 	struct lan78xx_net *dev = entry->dev;
3115 	int urb_status = urb->status;
3116 	enum skb_state state;
3117 
3118 	skb_put(skb, urb->actual_length);
3119 	state = rx_done;
3120 	entry->urb = NULL;
3121 
3122 	switch (urb_status) {
3123 	case 0:
3124 		if (skb->len < dev->net->hard_header_len) {
3125 			state = rx_cleanup;
3126 			dev->net->stats.rx_errors++;
3127 			dev->net->stats.rx_length_errors++;
3128 			netif_dbg(dev, rx_err, dev->net,
3129 				  "rx length %d\n", skb->len);
3130 		}
3131 		usb_mark_last_busy(dev->udev);
3132 		break;
3133 	case -EPIPE:
3134 		dev->net->stats.rx_errors++;
3135 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3136 		/* FALLTHROUGH */
3137 	case -ECONNRESET:				/* async unlink */
3138 	case -ESHUTDOWN:				/* hardware gone */
3139 		netif_dbg(dev, ifdown, dev->net,
3140 			  "rx shutdown, code %d\n", urb_status);
3141 		state = rx_cleanup;
3142 		entry->urb = urb;
3143 		urb = NULL;
3144 		break;
3145 	case -EPROTO:
3146 	case -ETIME:
3147 	case -EILSEQ:
3148 		dev->net->stats.rx_errors++;
3149 		state = rx_cleanup;
3150 		entry->urb = urb;
3151 		urb = NULL;
3152 		break;
3153 
3154 	/* data overrun ... flush fifo? */
3155 	case -EOVERFLOW:
3156 		dev->net->stats.rx_over_errors++;
3157 		/* FALLTHROUGH */
3158 
3159 	default:
3160 		state = rx_cleanup;
3161 		dev->net->stats.rx_errors++;
3162 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3163 		break;
3164 	}
3165 
3166 	state = defer_bh(dev, skb, &dev->rxq, state);
3167 
3168 	if (urb) {
3169 		if (netif_running(dev->net) &&
3170 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3171 		    state != unlink_start) {
3172 			rx_submit(dev, urb, GFP_ATOMIC);
3173 			return;
3174 		}
3175 		usb_free_urb(urb);
3176 	}
3177 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3178 }
3179 
3180 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3181 {
3182 	int length;
3183 	struct urb *urb = NULL;
3184 	struct skb_data *entry;
3185 	unsigned long flags;
3186 	struct sk_buff_head *tqp = &dev->txq_pend;
3187 	struct sk_buff *skb, *skb2;
3188 	int ret;
3189 	int count, pos;
3190 	int skb_totallen, pkt_cnt;
3191 
3192 	skb_totallen = 0;
3193 	pkt_cnt = 0;
3194 	count = 0;
3195 	length = 0;
3196 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3197 		if (skb_is_gso(skb)) {
3198 			if (pkt_cnt) {
3199 				/* handle previous packets first */
3200 				break;
3201 			}
3202 			count = 1;
3203 			length = skb->len - TX_OVERHEAD;
3204 			skb2 = skb_dequeue(tqp);
3205 			goto gso_skb;
3206 		}
3207 
3208 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3209 			break;
3210 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3211 		pkt_cnt++;
3212 	}
3213 
3214 	/* copy to a single skb */
3215 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3216 	if (!skb)
3217 		goto drop;
3218 
3219 	skb_put(skb, skb_totallen);
3220 
3221 	for (count = pos = 0; count < pkt_cnt; count++) {
3222 		skb2 = skb_dequeue(tqp);
3223 		if (skb2) {
3224 			length += (skb2->len - TX_OVERHEAD);
3225 			memcpy(skb->data + pos, skb2->data, skb2->len);
3226 			pos += roundup(skb2->len, sizeof(u32));
3227 			dev_kfree_skb(skb2);
3228 		}
3229 	}
3230 
3231 gso_skb:
3232 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3233 	if (!urb)
3234 		goto drop;
3235 
3236 	entry = (struct skb_data *)skb->cb;
3237 	entry->urb = urb;
3238 	entry->dev = dev;
3239 	entry->length = length;
3240 	entry->num_of_packet = count;
3241 
3242 	spin_lock_irqsave(&dev->txq.lock, flags);
3243 	ret = usb_autopm_get_interface_async(dev->intf);
3244 	if (ret < 0) {
3245 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3246 		goto drop;
3247 	}
3248 
3249 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3250 			  skb->data, skb->len, tx_complete, skb);
3251 
3252 	if (length % dev->maxpacket == 0) {
3253 		/* send USB_ZERO_PACKET */
3254 		urb->transfer_flags |= URB_ZERO_PACKET;
3255 	}
3256 
3257 #ifdef CONFIG_PM
3258 	/* if this triggers the device is still a sleep */
3259 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3260 		/* transmission will be done in resume */
3261 		usb_anchor_urb(urb, &dev->deferred);
3262 		/* no use to process more packets */
3263 		netif_stop_queue(dev->net);
3264 		usb_put_urb(urb);
3265 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3266 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3267 		return;
3268 	}
3269 #endif
3270 
3271 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3272 	switch (ret) {
3273 	case 0:
3274 		netif_trans_update(dev->net);
3275 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3276 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3277 			netif_stop_queue(dev->net);
3278 		break;
3279 	case -EPIPE:
3280 		netif_stop_queue(dev->net);
3281 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3282 		usb_autopm_put_interface_async(dev->intf);
3283 		break;
3284 	default:
3285 		usb_autopm_put_interface_async(dev->intf);
3286 		netif_dbg(dev, tx_err, dev->net,
3287 			  "tx: submit urb err %d\n", ret);
3288 		break;
3289 	}
3290 
3291 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3292 
3293 	if (ret) {
3294 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3295 drop:
3296 		dev->net->stats.tx_dropped++;
3297 		if (skb)
3298 			dev_kfree_skb_any(skb);
3299 		usb_free_urb(urb);
3300 	} else
3301 		netif_dbg(dev, tx_queued, dev->net,
3302 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3303 }
3304 
3305 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3306 {
3307 	struct urb *urb;
3308 	int i;
3309 
3310 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3311 		for (i = 0; i < 10; i++) {
3312 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3313 				break;
3314 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3315 			if (urb)
3316 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3317 					return;
3318 		}
3319 
3320 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3321 			tasklet_schedule(&dev->bh);
3322 	}
3323 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3324 		netif_wake_queue(dev->net);
3325 }
3326 
3327 static void lan78xx_bh(unsigned long param)
3328 {
3329 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3330 	struct sk_buff *skb;
3331 	struct skb_data *entry;
3332 
3333 	while ((skb = skb_dequeue(&dev->done))) {
3334 		entry = (struct skb_data *)(skb->cb);
3335 		switch (entry->state) {
3336 		case rx_done:
3337 			entry->state = rx_cleanup;
3338 			rx_process(dev, skb);
3339 			continue;
3340 		case tx_done:
3341 			usb_free_urb(entry->urb);
3342 			dev_kfree_skb(skb);
3343 			continue;
3344 		case rx_cleanup:
3345 			usb_free_urb(entry->urb);
3346 			dev_kfree_skb(skb);
3347 			continue;
3348 		default:
3349 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3350 			return;
3351 		}
3352 	}
3353 
3354 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3355 		/* reset update timer delta */
3356 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3357 			dev->delta = 1;
3358 			mod_timer(&dev->stat_monitor,
3359 				  jiffies + STAT_UPDATE_TIMER);
3360 		}
3361 
3362 		if (!skb_queue_empty(&dev->txq_pend))
3363 			lan78xx_tx_bh(dev);
3364 
3365 		if (!timer_pending(&dev->delay) &&
3366 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3367 			lan78xx_rx_bh(dev);
3368 	}
3369 }
3370 
3371 static void lan78xx_delayedwork(struct work_struct *work)
3372 {
3373 	int status;
3374 	struct lan78xx_net *dev;
3375 
3376 	dev = container_of(work, struct lan78xx_net, wq.work);
3377 
3378 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3379 		unlink_urbs(dev, &dev->txq);
3380 		status = usb_autopm_get_interface(dev->intf);
3381 		if (status < 0)
3382 			goto fail_pipe;
3383 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3384 		usb_autopm_put_interface(dev->intf);
3385 		if (status < 0 &&
3386 		    status != -EPIPE &&
3387 		    status != -ESHUTDOWN) {
3388 			if (netif_msg_tx_err(dev))
3389 fail_pipe:
3390 				netdev_err(dev->net,
3391 					   "can't clear tx halt, status %d\n",
3392 					   status);
3393 		} else {
3394 			clear_bit(EVENT_TX_HALT, &dev->flags);
3395 			if (status != -ESHUTDOWN)
3396 				netif_wake_queue(dev->net);
3397 		}
3398 	}
3399 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3400 		unlink_urbs(dev, &dev->rxq);
3401 		status = usb_autopm_get_interface(dev->intf);
3402 		if (status < 0)
3403 				goto fail_halt;
3404 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3405 		usb_autopm_put_interface(dev->intf);
3406 		if (status < 0 &&
3407 		    status != -EPIPE &&
3408 		    status != -ESHUTDOWN) {
3409 			if (netif_msg_rx_err(dev))
3410 fail_halt:
3411 				netdev_err(dev->net,
3412 					   "can't clear rx halt, status %d\n",
3413 					   status);
3414 		} else {
3415 			clear_bit(EVENT_RX_HALT, &dev->flags);
3416 			tasklet_schedule(&dev->bh);
3417 		}
3418 	}
3419 
3420 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3421 		int ret = 0;
3422 
3423 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3424 		status = usb_autopm_get_interface(dev->intf);
3425 		if (status < 0)
3426 			goto skip_reset;
3427 		if (lan78xx_link_reset(dev) < 0) {
3428 			usb_autopm_put_interface(dev->intf);
3429 skip_reset:
3430 			netdev_info(dev->net, "link reset failed (%d)\n",
3431 				    ret);
3432 		} else {
3433 			usb_autopm_put_interface(dev->intf);
3434 		}
3435 	}
3436 
3437 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3438 		lan78xx_update_stats(dev);
3439 
3440 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3441 
3442 		mod_timer(&dev->stat_monitor,
3443 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3444 
3445 		dev->delta = min((dev->delta * 2), 50);
3446 	}
3447 }
3448 
3449 static void intr_complete(struct urb *urb)
3450 {
3451 	struct lan78xx_net *dev = urb->context;
3452 	int status = urb->status;
3453 
3454 	switch (status) {
3455 	/* success */
3456 	case 0:
3457 		lan78xx_status(dev, urb);
3458 		break;
3459 
3460 	/* software-driven interface shutdown */
3461 	case -ENOENT:			/* urb killed */
3462 	case -ESHUTDOWN:		/* hardware gone */
3463 		netif_dbg(dev, ifdown, dev->net,
3464 			  "intr shutdown, code %d\n", status);
3465 		return;
3466 
3467 	/* NOTE:  not throttling like RX/TX, since this endpoint
3468 	 * already polls infrequently
3469 	 */
3470 	default:
3471 		netdev_dbg(dev->net, "intr status %d\n", status);
3472 		break;
3473 	}
3474 
3475 	if (!netif_running(dev->net))
3476 		return;
3477 
3478 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3479 	status = usb_submit_urb(urb, GFP_ATOMIC);
3480 	if (status != 0)
3481 		netif_err(dev, timer, dev->net,
3482 			  "intr resubmit --> %d\n", status);
3483 }
3484 
3485 static void lan78xx_disconnect(struct usb_interface *intf)
3486 {
3487 	struct lan78xx_net		*dev;
3488 	struct usb_device		*udev;
3489 	struct net_device		*net;
3490 
3491 	dev = usb_get_intfdata(intf);
3492 	usb_set_intfdata(intf, NULL);
3493 	if (!dev)
3494 		return;
3495 
3496 	udev = interface_to_usbdev(intf);
3497 	net = dev->net;
3498 
3499 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3500 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3501 
3502 	phy_disconnect(net->phydev);
3503 
3504 	unregister_netdev(net);
3505 
3506 	cancel_delayed_work_sync(&dev->wq);
3507 
3508 	usb_scuttle_anchored_urbs(&dev->deferred);
3509 
3510 	lan78xx_unbind(dev, intf);
3511 
3512 	usb_kill_urb(dev->urb_intr);
3513 	usb_free_urb(dev->urb_intr);
3514 
3515 	free_netdev(net);
3516 	usb_put_dev(udev);
3517 }
3518 
3519 static void lan78xx_tx_timeout(struct net_device *net)
3520 {
3521 	struct lan78xx_net *dev = netdev_priv(net);
3522 
3523 	unlink_urbs(dev, &dev->txq);
3524 	tasklet_schedule(&dev->bh);
3525 }
3526 
3527 static const struct net_device_ops lan78xx_netdev_ops = {
3528 	.ndo_open		= lan78xx_open,
3529 	.ndo_stop		= lan78xx_stop,
3530 	.ndo_start_xmit		= lan78xx_start_xmit,
3531 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3532 	.ndo_change_mtu		= lan78xx_change_mtu,
3533 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3534 	.ndo_validate_addr	= eth_validate_addr,
3535 	.ndo_do_ioctl		= lan78xx_ioctl,
3536 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3537 	.ndo_set_features	= lan78xx_set_features,
3538 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3539 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3540 };
3541 
3542 static void lan78xx_stat_monitor(struct timer_list *t)
3543 {
3544 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3545 
3546 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3547 }
3548 
3549 static int lan78xx_probe(struct usb_interface *intf,
3550 			 const struct usb_device_id *id)
3551 {
3552 	struct lan78xx_net *dev;
3553 	struct net_device *netdev;
3554 	struct usb_device *udev;
3555 	int ret;
3556 	unsigned maxp;
3557 	unsigned period;
3558 	u8 *buf = NULL;
3559 
3560 	udev = interface_to_usbdev(intf);
3561 	udev = usb_get_dev(udev);
3562 
3563 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3564 	if (!netdev) {
3565 		dev_err(&intf->dev, "Error: OOM\n");
3566 		ret = -ENOMEM;
3567 		goto out1;
3568 	}
3569 
3570 	/* netdev_printk() needs this */
3571 	SET_NETDEV_DEV(netdev, &intf->dev);
3572 
3573 	dev = netdev_priv(netdev);
3574 	dev->udev = udev;
3575 	dev->intf = intf;
3576 	dev->net = netdev;
3577 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3578 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3579 
3580 	skb_queue_head_init(&dev->rxq);
3581 	skb_queue_head_init(&dev->txq);
3582 	skb_queue_head_init(&dev->done);
3583 	skb_queue_head_init(&dev->rxq_pause);
3584 	skb_queue_head_init(&dev->txq_pend);
3585 	mutex_init(&dev->phy_mutex);
3586 
3587 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3588 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3589 	init_usb_anchor(&dev->deferred);
3590 
3591 	netdev->netdev_ops = &lan78xx_netdev_ops;
3592 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3593 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3594 
3595 	dev->delta = 1;
3596 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3597 
3598 	mutex_init(&dev->stats.access_lock);
3599 
3600 	ret = lan78xx_bind(dev, intf);
3601 	if (ret < 0)
3602 		goto out2;
3603 	strcpy(netdev->name, "eth%d");
3604 
3605 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3606 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3607 
3608 	/* MTU range: 68 - 9000 */
3609 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3610 
3611 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3612 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3613 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3614 
3615 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3616 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3617 
3618 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3619 					dev->ep_intr->desc.bEndpointAddress &
3620 					USB_ENDPOINT_NUMBER_MASK);
3621 	period = dev->ep_intr->desc.bInterval;
3622 
3623 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3624 	buf = kmalloc(maxp, GFP_KERNEL);
3625 	if (buf) {
3626 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3627 		if (!dev->urb_intr) {
3628 			ret = -ENOMEM;
3629 			kfree(buf);
3630 			goto out3;
3631 		} else {
3632 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3633 					 dev->pipe_intr, buf, maxp,
3634 					 intr_complete, dev, period);
3635 		}
3636 	}
3637 
3638 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3639 
3640 	/* driver requires remote-wakeup capability during autosuspend. */
3641 	intf->needs_remote_wakeup = 1;
3642 
3643 	ret = register_netdev(netdev);
3644 	if (ret != 0) {
3645 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3646 		goto out3;
3647 	}
3648 
3649 	usb_set_intfdata(intf, dev);
3650 
3651 	ret = device_set_wakeup_enable(&udev->dev, true);
3652 
3653 	 /* Default delay of 2sec has more overhead than advantage.
3654 	  * Set to 10sec as default.
3655 	  */
3656 	pm_runtime_set_autosuspend_delay(&udev->dev,
3657 					 DEFAULT_AUTOSUSPEND_DELAY);
3658 
3659 	ret = lan78xx_phy_init(dev);
3660 	if (ret < 0)
3661 		goto out4;
3662 
3663 	return 0;
3664 
3665 out4:
3666 	unregister_netdev(netdev);
3667 out3:
3668 	lan78xx_unbind(dev, intf);
3669 out2:
3670 	free_netdev(netdev);
3671 out1:
3672 	usb_put_dev(udev);
3673 
3674 	return ret;
3675 }
3676 
3677 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3678 {
3679 	const u16 crc16poly = 0x8005;
3680 	int i;
3681 	u16 bit, crc, msb;
3682 	u8 data;
3683 
3684 	crc = 0xFFFF;
3685 	for (i = 0; i < len; i++) {
3686 		data = *buf++;
3687 		for (bit = 0; bit < 8; bit++) {
3688 			msb = crc >> 15;
3689 			crc <<= 1;
3690 
3691 			if (msb ^ (u16)(data & 1)) {
3692 				crc ^= crc16poly;
3693 				crc |= (u16)0x0001U;
3694 			}
3695 			data >>= 1;
3696 		}
3697 	}
3698 
3699 	return crc;
3700 }
3701 
3702 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3703 {
3704 	u32 buf;
3705 	int ret;
3706 	int mask_index;
3707 	u16 crc;
3708 	u32 temp_wucsr;
3709 	u32 temp_pmt_ctl;
3710 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3711 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3712 	const u8 arp_type[2] = { 0x08, 0x06 };
3713 
3714 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3715 	buf &= ~MAC_TX_TXEN_;
3716 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3717 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3718 	buf &= ~MAC_RX_RXEN_;
3719 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3720 
3721 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3722 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3723 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3724 
3725 	temp_wucsr = 0;
3726 
3727 	temp_pmt_ctl = 0;
3728 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3729 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3730 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3731 
3732 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3733 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3734 
3735 	mask_index = 0;
3736 	if (wol & WAKE_PHY) {
3737 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3738 
3739 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3740 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3741 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3742 	}
3743 	if (wol & WAKE_MAGIC) {
3744 		temp_wucsr |= WUCSR_MPEN_;
3745 
3746 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3747 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3748 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3749 	}
3750 	if (wol & WAKE_BCAST) {
3751 		temp_wucsr |= WUCSR_BCST_EN_;
3752 
3753 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3754 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3755 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3756 	}
3757 	if (wol & WAKE_MCAST) {
3758 		temp_wucsr |= WUCSR_WAKE_EN_;
3759 
3760 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3761 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3762 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3763 					WUF_CFGX_EN_ |
3764 					WUF_CFGX_TYPE_MCAST_ |
3765 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3766 					(crc & WUF_CFGX_CRC16_MASK_));
3767 
3768 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3769 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3770 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3771 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3772 		mask_index++;
3773 
3774 		/* for IPv6 Multicast */
3775 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3776 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3777 					WUF_CFGX_EN_ |
3778 					WUF_CFGX_TYPE_MCAST_ |
3779 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3780 					(crc & WUF_CFGX_CRC16_MASK_));
3781 
3782 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3783 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3784 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3785 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3786 		mask_index++;
3787 
3788 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3789 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3790 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3791 	}
3792 	if (wol & WAKE_UCAST) {
3793 		temp_wucsr |= WUCSR_PFDA_EN_;
3794 
3795 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3796 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3797 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3798 	}
3799 	if (wol & WAKE_ARP) {
3800 		temp_wucsr |= WUCSR_WAKE_EN_;
3801 
3802 		/* set WUF_CFG & WUF_MASK
3803 		 * for packettype (offset 12,13) = ARP (0x0806)
3804 		 */
3805 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3806 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3807 					WUF_CFGX_EN_ |
3808 					WUF_CFGX_TYPE_ALL_ |
3809 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3810 					(crc & WUF_CFGX_CRC16_MASK_));
3811 
3812 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3813 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3814 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3815 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3816 		mask_index++;
3817 
3818 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3819 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3820 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3821 	}
3822 
3823 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3824 
3825 	/* when multiple WOL bits are set */
3826 	if (hweight_long((unsigned long)wol) > 1) {
3827 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3828 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3829 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3830 	}
3831 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3832 
3833 	/* clear WUPS */
3834 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3835 	buf |= PMT_CTL_WUPS_MASK_;
3836 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3837 
3838 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3839 	buf |= MAC_RX_RXEN_;
3840 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3841 
3842 	return 0;
3843 }
3844 
3845 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3846 {
3847 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3848 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3849 	u32 buf;
3850 	int ret;
3851 	int event;
3852 
3853 	event = message.event;
3854 
3855 	if (!dev->suspend_count++) {
3856 		spin_lock_irq(&dev->txq.lock);
3857 		/* don't autosuspend while transmitting */
3858 		if ((skb_queue_len(&dev->txq) ||
3859 		     skb_queue_len(&dev->txq_pend)) &&
3860 			PMSG_IS_AUTO(message)) {
3861 			spin_unlock_irq(&dev->txq.lock);
3862 			ret = -EBUSY;
3863 			goto out;
3864 		} else {
3865 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3866 			spin_unlock_irq(&dev->txq.lock);
3867 		}
3868 
3869 		/* stop TX & RX */
3870 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3871 		buf &= ~MAC_TX_TXEN_;
3872 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3873 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3874 		buf &= ~MAC_RX_RXEN_;
3875 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3876 
3877 		/* empty out the rx and queues */
3878 		netif_device_detach(dev->net);
3879 		lan78xx_terminate_urbs(dev);
3880 		usb_kill_urb(dev->urb_intr);
3881 
3882 		/* reattach */
3883 		netif_device_attach(dev->net);
3884 	}
3885 
3886 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3887 		del_timer(&dev->stat_monitor);
3888 
3889 		if (PMSG_IS_AUTO(message)) {
3890 			/* auto suspend (selective suspend) */
3891 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3892 			buf &= ~MAC_TX_TXEN_;
3893 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3894 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3895 			buf &= ~MAC_RX_RXEN_;
3896 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3897 
3898 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3899 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3900 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3901 
3902 			/* set goodframe wakeup */
3903 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3904 
3905 			buf |= WUCSR_RFE_WAKE_EN_;
3906 			buf |= WUCSR_STORE_WAKE_;
3907 
3908 			ret = lan78xx_write_reg(dev, WUCSR, buf);
3909 
3910 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3911 
3912 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3913 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
3914 
3915 			buf |= PMT_CTL_PHY_WAKE_EN_;
3916 			buf |= PMT_CTL_WOL_EN_;
3917 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
3918 			buf |= PMT_CTL_SUS_MODE_3_;
3919 
3920 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3921 
3922 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3923 
3924 			buf |= PMT_CTL_WUPS_MASK_;
3925 
3926 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3927 
3928 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3929 			buf |= MAC_RX_RXEN_;
3930 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3931 		} else {
3932 			lan78xx_set_suspend(dev, pdata->wol);
3933 		}
3934 	}
3935 
3936 	ret = 0;
3937 out:
3938 	return ret;
3939 }
3940 
3941 static int lan78xx_resume(struct usb_interface *intf)
3942 {
3943 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3944 	struct sk_buff *skb;
3945 	struct urb *res;
3946 	int ret;
3947 	u32 buf;
3948 
3949 	if (!timer_pending(&dev->stat_monitor)) {
3950 		dev->delta = 1;
3951 		mod_timer(&dev->stat_monitor,
3952 			  jiffies + STAT_UPDATE_TIMER);
3953 	}
3954 
3955 	if (!--dev->suspend_count) {
3956 		/* resume interrupt URBs */
3957 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3958 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
3959 
3960 		spin_lock_irq(&dev->txq.lock);
3961 		while ((res = usb_get_from_anchor(&dev->deferred))) {
3962 			skb = (struct sk_buff *)res->context;
3963 			ret = usb_submit_urb(res, GFP_ATOMIC);
3964 			if (ret < 0) {
3965 				dev_kfree_skb_any(skb);
3966 				usb_free_urb(res);
3967 				usb_autopm_put_interface_async(dev->intf);
3968 			} else {
3969 				netif_trans_update(dev->net);
3970 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
3971 			}
3972 		}
3973 
3974 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3975 		spin_unlock_irq(&dev->txq.lock);
3976 
3977 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3978 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3979 				netif_start_queue(dev->net);
3980 			tasklet_schedule(&dev->bh);
3981 		}
3982 	}
3983 
3984 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3985 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3986 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3987 
3988 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3989 					     WUCSR2_ARP_RCD_ |
3990 					     WUCSR2_IPV6_TCPSYN_RCD_ |
3991 					     WUCSR2_IPV4_TCPSYN_RCD_);
3992 
3993 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3994 					    WUCSR_EEE_RX_WAKE_ |
3995 					    WUCSR_PFDA_FR_ |
3996 					    WUCSR_RFE_WAKE_FR_ |
3997 					    WUCSR_WUFR_ |
3998 					    WUCSR_MPR_ |
3999 					    WUCSR_BCST_FR_);
4000 
4001 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4002 	buf |= MAC_TX_TXEN_;
4003 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
4004 
4005 	return 0;
4006 }
4007 
4008 static int lan78xx_reset_resume(struct usb_interface *intf)
4009 {
4010 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4011 
4012 	lan78xx_reset(dev);
4013 
4014 	phy_start(dev->net->phydev);
4015 
4016 	return lan78xx_resume(intf);
4017 }
4018 
4019 static const struct usb_device_id products[] = {
4020 	{
4021 	/* LAN7800 USB Gigabit Ethernet Device */
4022 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4023 	},
4024 	{
4025 	/* LAN7850 USB Gigabit Ethernet Device */
4026 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4027 	},
4028 	{
4029 	/* LAN7801 USB Gigabit Ethernet Device */
4030 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4031 	},
4032 	{},
4033 };
4034 MODULE_DEVICE_TABLE(usb, products);
4035 
4036 static struct usb_driver lan78xx_driver = {
4037 	.name			= DRIVER_NAME,
4038 	.id_table		= products,
4039 	.probe			= lan78xx_probe,
4040 	.disconnect		= lan78xx_disconnect,
4041 	.suspend		= lan78xx_suspend,
4042 	.resume			= lan78xx_resume,
4043 	.reset_resume		= lan78xx_reset_resume,
4044 	.supports_autosuspend	= 1,
4045 	.disable_hub_initiated_lpm = 1,
4046 };
4047 
4048 module_usb_driver(lan78xx_driver);
4049 
4050 MODULE_AUTHOR(DRIVER_AUTHOR);
4051 MODULE_DESCRIPTION(DRIVER_DESC);
4052 MODULE_LICENSE("GPL");
4053