xref: /openbmc/linux/drivers/net/usb/lan78xx.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 struct usb_context {
384 	struct usb_ctrlrequest req;
385 	struct lan78xx_net *dev;
386 };
387 
388 #define EVENT_TX_HALT			0
389 #define EVENT_RX_HALT			1
390 #define EVENT_RX_MEMORY			2
391 #define EVENT_STS_SPLIT			3
392 #define EVENT_LINK_RESET		4
393 #define EVENT_RX_PAUSED			5
394 #define EVENT_DEV_WAKING		6
395 #define EVENT_DEV_ASLEEP		7
396 #define EVENT_DEV_OPEN			8
397 #define EVENT_STAT_UPDATE		9
398 #define EVENT_DEV_DISCONNECT		10
399 
400 struct statstage {
401 	struct mutex			access_lock;	/* for stats access */
402 	struct lan78xx_statstage	saved;
403 	struct lan78xx_statstage	rollover_count;
404 	struct lan78xx_statstage	rollover_max;
405 	struct lan78xx_statstage64	curr_stat;
406 };
407 
408 struct irq_domain_data {
409 	struct irq_domain	*irqdomain;
410 	unsigned int		phyirq;
411 	struct irq_chip		*irqchip;
412 	irq_flow_handler_t	irq_handler;
413 	u32			irqenable;
414 	struct mutex		irq_lock;		/* for irq bus access */
415 };
416 
417 struct lan78xx_net {
418 	struct net_device	*net;
419 	struct usb_device	*udev;
420 	struct usb_interface	*intf;
421 	void			*driver_priv;
422 
423 	unsigned int		tx_pend_data_len;
424 	size_t			n_tx_urbs;
425 	size_t			n_rx_urbs;
426 	size_t			tx_urb_size;
427 	size_t			rx_urb_size;
428 
429 	struct sk_buff_head	rxq_free;
430 	struct sk_buff_head	rxq;
431 	struct sk_buff_head	rxq_done;
432 	struct sk_buff_head	rxq_overflow;
433 	struct sk_buff_head	txq_free;
434 	struct sk_buff_head	txq;
435 	struct sk_buff_head	txq_pend;
436 
437 	struct napi_struct	napi;
438 
439 	struct delayed_work	wq;
440 
441 	int			msg_enable;
442 
443 	struct urb		*urb_intr;
444 	struct usb_anchor	deferred;
445 
446 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
447 	struct mutex		phy_mutex; /* for phy access */
448 	unsigned int		pipe_in, pipe_out, pipe_intr;
449 
450 	unsigned int		bulk_in_delay;
451 	unsigned int		burst_cap;
452 
453 	unsigned long		flags;
454 
455 	wait_queue_head_t	*wait;
456 	unsigned char		suspend_count;
457 
458 	unsigned int		maxpacket;
459 	struct timer_list	stat_monitor;
460 
461 	unsigned long		data[5];
462 
463 	int			link_on;
464 	u8			mdix_ctrl;
465 
466 	u32			chipid;
467 	u32			chiprev;
468 	struct mii_bus		*mdiobus;
469 	phy_interface_t		interface;
470 
471 	int			fc_autoneg;
472 	u8			fc_request_control;
473 
474 	int			delta;
475 	struct statstage	stats;
476 
477 	struct irq_domain_data	domain_data;
478 };
479 
480 /* define external phy id */
481 #define	PHY_LAN8835			(0x0007C130)
482 #define	PHY_KSZ9031RNX			(0x00221620)
483 
484 /* use ethtool to change the level for any given device */
485 static int msg_level = -1;
486 module_param(msg_level, int, 0);
487 MODULE_PARM_DESC(msg_level, "Override default message level");
488 
lan78xx_get_buf(struct sk_buff_head * buf_pool)489 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
490 {
491 	if (skb_queue_empty(buf_pool))
492 		return NULL;
493 
494 	return skb_dequeue(buf_pool);
495 }
496 
lan78xx_release_buf(struct sk_buff_head * buf_pool,struct sk_buff * buf)497 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
498 				struct sk_buff *buf)
499 {
500 	buf->data = buf->head;
501 	skb_reset_tail_pointer(buf);
502 
503 	buf->len = 0;
504 	buf->data_len = 0;
505 
506 	skb_queue_tail(buf_pool, buf);
507 }
508 
lan78xx_free_buf_pool(struct sk_buff_head * buf_pool)509 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
510 {
511 	struct skb_data *entry;
512 	struct sk_buff *buf;
513 
514 	while (!skb_queue_empty(buf_pool)) {
515 		buf = skb_dequeue(buf_pool);
516 		if (buf) {
517 			entry = (struct skb_data *)buf->cb;
518 			usb_free_urb(entry->urb);
519 			dev_kfree_skb_any(buf);
520 		}
521 	}
522 }
523 
lan78xx_alloc_buf_pool(struct sk_buff_head * buf_pool,size_t n_urbs,size_t urb_size,struct lan78xx_net * dev)524 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
525 				  size_t n_urbs, size_t urb_size,
526 				  struct lan78xx_net *dev)
527 {
528 	struct skb_data *entry;
529 	struct sk_buff *buf;
530 	struct urb *urb;
531 	int i;
532 
533 	skb_queue_head_init(buf_pool);
534 
535 	for (i = 0; i < n_urbs; i++) {
536 		buf = alloc_skb(urb_size, GFP_ATOMIC);
537 		if (!buf)
538 			goto error;
539 
540 		if (skb_linearize(buf) != 0) {
541 			dev_kfree_skb_any(buf);
542 			goto error;
543 		}
544 
545 		urb = usb_alloc_urb(0, GFP_ATOMIC);
546 		if (!urb) {
547 			dev_kfree_skb_any(buf);
548 			goto error;
549 		}
550 
551 		entry = (struct skb_data *)buf->cb;
552 		entry->urb = urb;
553 		entry->dev = dev;
554 		entry->length = 0;
555 		entry->num_of_packet = 0;
556 
557 		skb_queue_tail(buf_pool, buf);
558 	}
559 
560 	return 0;
561 
562 error:
563 	lan78xx_free_buf_pool(buf_pool);
564 
565 	return -ENOMEM;
566 }
567 
lan78xx_get_rx_buf(struct lan78xx_net * dev)568 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
569 {
570 	return lan78xx_get_buf(&dev->rxq_free);
571 }
572 
lan78xx_release_rx_buf(struct lan78xx_net * dev,struct sk_buff * rx_buf)573 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
574 				   struct sk_buff *rx_buf)
575 {
576 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
577 }
578 
lan78xx_free_rx_resources(struct lan78xx_net * dev)579 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
580 {
581 	lan78xx_free_buf_pool(&dev->rxq_free);
582 }
583 
lan78xx_alloc_rx_resources(struct lan78xx_net * dev)584 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
585 {
586 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
587 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
588 }
589 
lan78xx_get_tx_buf(struct lan78xx_net * dev)590 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
591 {
592 	return lan78xx_get_buf(&dev->txq_free);
593 }
594 
lan78xx_release_tx_buf(struct lan78xx_net * dev,struct sk_buff * tx_buf)595 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
596 				   struct sk_buff *tx_buf)
597 {
598 	lan78xx_release_buf(&dev->txq_free, tx_buf);
599 }
600 
lan78xx_free_tx_resources(struct lan78xx_net * dev)601 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
602 {
603 	lan78xx_free_buf_pool(&dev->txq_free);
604 }
605 
lan78xx_alloc_tx_resources(struct lan78xx_net * dev)606 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
607 {
608 	return lan78xx_alloc_buf_pool(&dev->txq_free,
609 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
610 }
611 
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)612 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
613 {
614 	u32 *buf;
615 	int ret;
616 
617 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
618 		return -ENODEV;
619 
620 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
621 	if (!buf)
622 		return -ENOMEM;
623 
624 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
625 			      USB_VENDOR_REQUEST_READ_REGISTER,
626 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
627 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
628 	if (likely(ret >= 0)) {
629 		le32_to_cpus(buf);
630 		*data = *buf;
631 	} else if (net_ratelimit()) {
632 		netdev_warn(dev->net,
633 			    "Failed to read register index 0x%08x. ret = %d",
634 			    index, ret);
635 	}
636 
637 	kfree(buf);
638 
639 	return ret;
640 }
641 
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)642 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
643 {
644 	u32 *buf;
645 	int ret;
646 
647 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
648 		return -ENODEV;
649 
650 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
651 	if (!buf)
652 		return -ENOMEM;
653 
654 	*buf = data;
655 	cpu_to_le32s(buf);
656 
657 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
658 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
659 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
660 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
661 	if (unlikely(ret < 0) &&
662 	    net_ratelimit()) {
663 		netdev_warn(dev->net,
664 			    "Failed to write register index 0x%08x. ret = %d",
665 			    index, ret);
666 	}
667 
668 	kfree(buf);
669 
670 	return ret;
671 }
672 
lan78xx_update_reg(struct lan78xx_net * dev,u32 reg,u32 mask,u32 data)673 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
674 			      u32 data)
675 {
676 	int ret;
677 	u32 buf;
678 
679 	ret = lan78xx_read_reg(dev, reg, &buf);
680 	if (ret < 0)
681 		return ret;
682 
683 	buf &= ~mask;
684 	buf |= (mask & data);
685 
686 	ret = lan78xx_write_reg(dev, reg, buf);
687 	if (ret < 0)
688 		return ret;
689 
690 	return 0;
691 }
692 
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)693 static int lan78xx_read_stats(struct lan78xx_net *dev,
694 			      struct lan78xx_statstage *data)
695 {
696 	int ret = 0;
697 	int i;
698 	struct lan78xx_statstage *stats;
699 	u32 *src;
700 	u32 *dst;
701 
702 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
703 	if (!stats)
704 		return -ENOMEM;
705 
706 	ret = usb_control_msg(dev->udev,
707 			      usb_rcvctrlpipe(dev->udev, 0),
708 			      USB_VENDOR_REQUEST_GET_STATS,
709 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
710 			      0,
711 			      0,
712 			      (void *)stats,
713 			      sizeof(*stats),
714 			      USB_CTRL_SET_TIMEOUT);
715 	if (likely(ret >= 0)) {
716 		src = (u32 *)stats;
717 		dst = (u32 *)data;
718 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
719 			le32_to_cpus(&src[i]);
720 			dst[i] = src[i];
721 		}
722 	} else {
723 		netdev_warn(dev->net,
724 			    "Failed to read stat ret = %d", ret);
725 	}
726 
727 	kfree(stats);
728 
729 	return ret;
730 }
731 
732 #define check_counter_rollover(struct1, dev_stats, member)		\
733 	do {								\
734 		if ((struct1)->member < (dev_stats).saved.member)	\
735 			(dev_stats).rollover_count.member++;		\
736 	} while (0)
737 
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)738 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
739 					struct lan78xx_statstage *stats)
740 {
741 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
742 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
743 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
744 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
745 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
746 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
747 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
748 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
749 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
750 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
751 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
752 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
753 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
754 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
755 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
756 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
757 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
758 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
759 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
760 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
761 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
762 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
763 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
764 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
765 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
766 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
767 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
768 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
769 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
770 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
771 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
772 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
773 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
774 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
775 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
776 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
777 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
778 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
779 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
780 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
781 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
782 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
783 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
784 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
785 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
786 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
787 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
788 
789 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
790 }
791 
lan78xx_update_stats(struct lan78xx_net * dev)792 static void lan78xx_update_stats(struct lan78xx_net *dev)
793 {
794 	u32 *p, *count, *max;
795 	u64 *data;
796 	int i;
797 	struct lan78xx_statstage lan78xx_stats;
798 
799 	if (usb_autopm_get_interface(dev->intf) < 0)
800 		return;
801 
802 	p = (u32 *)&lan78xx_stats;
803 	count = (u32 *)&dev->stats.rollover_count;
804 	max = (u32 *)&dev->stats.rollover_max;
805 	data = (u64 *)&dev->stats.curr_stat;
806 
807 	mutex_lock(&dev->stats.access_lock);
808 
809 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
810 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
811 
812 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
813 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
814 
815 	mutex_unlock(&dev->stats.access_lock);
816 
817 	usb_autopm_put_interface(dev->intf);
818 }
819 
820 /* Loop until the read is completed with timeout called with phy_mutex held */
lan78xx_phy_wait_not_busy(struct lan78xx_net * dev)821 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
822 {
823 	unsigned long start_time = jiffies;
824 	u32 val;
825 	int ret;
826 
827 	do {
828 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
829 		if (unlikely(ret < 0))
830 			return -EIO;
831 
832 		if (!(val & MII_ACC_MII_BUSY_))
833 			return 0;
834 	} while (!time_after(jiffies, start_time + HZ));
835 
836 	return -EIO;
837 }
838 
mii_access(int id,int index,int read)839 static inline u32 mii_access(int id, int index, int read)
840 {
841 	u32 ret;
842 
843 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
844 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
845 	if (read)
846 		ret |= MII_ACC_MII_READ_;
847 	else
848 		ret |= MII_ACC_MII_WRITE_;
849 	ret |= MII_ACC_MII_BUSY_;
850 
851 	return ret;
852 }
853 
lan78xx_wait_eeprom(struct lan78xx_net * dev)854 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
855 {
856 	unsigned long start_time = jiffies;
857 	u32 val;
858 	int ret;
859 
860 	do {
861 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
862 		if (unlikely(ret < 0))
863 			return -EIO;
864 
865 		if (!(val & E2P_CMD_EPC_BUSY_) ||
866 		    (val & E2P_CMD_EPC_TIMEOUT_))
867 			break;
868 		usleep_range(40, 100);
869 	} while (!time_after(jiffies, start_time + HZ));
870 
871 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
872 		netdev_warn(dev->net, "EEPROM read operation timeout");
873 		return -EIO;
874 	}
875 
876 	return 0;
877 }
878 
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)879 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
880 {
881 	unsigned long start_time = jiffies;
882 	u32 val;
883 	int ret;
884 
885 	do {
886 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
887 		if (unlikely(ret < 0))
888 			return -EIO;
889 
890 		if (!(val & E2P_CMD_EPC_BUSY_))
891 			return 0;
892 
893 		usleep_range(40, 100);
894 	} while (!time_after(jiffies, start_time + HZ));
895 
896 	netdev_warn(dev->net, "EEPROM is busy");
897 	return -EIO;
898 }
899 
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)900 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
901 				   u32 length, u8 *data)
902 {
903 	u32 val;
904 	u32 saved;
905 	int i, ret;
906 	int retval;
907 
908 	/* depends on chip, some EEPROM pins are muxed with LED function.
909 	 * disable & restore LED function to access EEPROM.
910 	 */
911 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
912 	saved = val;
913 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
914 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
915 		ret = lan78xx_write_reg(dev, HW_CFG, val);
916 	}
917 
918 	retval = lan78xx_eeprom_confirm_not_busy(dev);
919 	if (retval)
920 		return retval;
921 
922 	for (i = 0; i < length; i++) {
923 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
924 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
925 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
926 		if (unlikely(ret < 0)) {
927 			retval = -EIO;
928 			goto exit;
929 		}
930 
931 		retval = lan78xx_wait_eeprom(dev);
932 		if (retval < 0)
933 			goto exit;
934 
935 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
936 		if (unlikely(ret < 0)) {
937 			retval = -EIO;
938 			goto exit;
939 		}
940 
941 		data[i] = val & 0xFF;
942 		offset++;
943 	}
944 
945 	retval = 0;
946 exit:
947 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
948 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
949 
950 	return retval;
951 }
952 
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)953 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
954 			       u32 length, u8 *data)
955 {
956 	u8 sig;
957 	int ret;
958 
959 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
960 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
961 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
962 	else
963 		ret = -EINVAL;
964 
965 	return ret;
966 }
967 
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)968 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
969 				    u32 length, u8 *data)
970 {
971 	u32 val;
972 	u32 saved;
973 	int i, ret;
974 	int retval;
975 
976 	/* depends on chip, some EEPROM pins are muxed with LED function.
977 	 * disable & restore LED function to access EEPROM.
978 	 */
979 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
980 	saved = val;
981 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
982 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
983 		ret = lan78xx_write_reg(dev, HW_CFG, val);
984 	}
985 
986 	retval = lan78xx_eeprom_confirm_not_busy(dev);
987 	if (retval)
988 		goto exit;
989 
990 	/* Issue write/erase enable command */
991 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
992 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
993 	if (unlikely(ret < 0)) {
994 		retval = -EIO;
995 		goto exit;
996 	}
997 
998 	retval = lan78xx_wait_eeprom(dev);
999 	if (retval < 0)
1000 		goto exit;
1001 
1002 	for (i = 0; i < length; i++) {
1003 		/* Fill data register */
1004 		val = data[i];
1005 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1006 		if (ret < 0) {
1007 			retval = -EIO;
1008 			goto exit;
1009 		}
1010 
1011 		/* Send "write" command */
1012 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1013 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1014 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1015 		if (ret < 0) {
1016 			retval = -EIO;
1017 			goto exit;
1018 		}
1019 
1020 		retval = lan78xx_wait_eeprom(dev);
1021 		if (retval < 0)
1022 			goto exit;
1023 
1024 		offset++;
1025 	}
1026 
1027 	retval = 0;
1028 exit:
1029 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1030 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
1031 
1032 	return retval;
1033 }
1034 
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1035 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1036 				u32 length, u8 *data)
1037 {
1038 	int i;
1039 	u32 buf;
1040 	unsigned long timeout;
1041 
1042 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1043 
1044 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1045 		/* clear it and wait to be cleared */
1046 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1047 
1048 		timeout = jiffies + HZ;
1049 		do {
1050 			usleep_range(1, 10);
1051 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1052 			if (time_after(jiffies, timeout)) {
1053 				netdev_warn(dev->net,
1054 					    "timeout on OTP_PWR_DN");
1055 				return -EIO;
1056 			}
1057 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1058 	}
1059 
1060 	for (i = 0; i < length; i++) {
1061 		lan78xx_write_reg(dev, OTP_ADDR1,
1062 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1063 		lan78xx_write_reg(dev, OTP_ADDR2,
1064 				  ((offset + i) & OTP_ADDR2_10_3));
1065 
1066 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1067 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1068 
1069 		timeout = jiffies + HZ;
1070 		do {
1071 			udelay(1);
1072 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1073 			if (time_after(jiffies, timeout)) {
1074 				netdev_warn(dev->net,
1075 					    "timeout on OTP_STATUS");
1076 				return -EIO;
1077 			}
1078 		} while (buf & OTP_STATUS_BUSY_);
1079 
1080 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1081 
1082 		data[i] = (u8)(buf & 0xFF);
1083 	}
1084 
1085 	return 0;
1086 }
1087 
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1088 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1089 				 u32 length, u8 *data)
1090 {
1091 	int i;
1092 	u32 buf;
1093 	unsigned long timeout;
1094 
1095 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1096 
1097 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1098 		/* clear it and wait to be cleared */
1099 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1100 
1101 		timeout = jiffies + HZ;
1102 		do {
1103 			udelay(1);
1104 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1105 			if (time_after(jiffies, timeout)) {
1106 				netdev_warn(dev->net,
1107 					    "timeout on OTP_PWR_DN completion");
1108 				return -EIO;
1109 			}
1110 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1111 	}
1112 
1113 	/* set to BYTE program mode */
1114 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1115 
1116 	for (i = 0; i < length; i++) {
1117 		lan78xx_write_reg(dev, OTP_ADDR1,
1118 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1119 		lan78xx_write_reg(dev, OTP_ADDR2,
1120 				  ((offset + i) & OTP_ADDR2_10_3));
1121 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1122 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1123 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1124 
1125 		timeout = jiffies + HZ;
1126 		do {
1127 			udelay(1);
1128 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1129 			if (time_after(jiffies, timeout)) {
1130 				netdev_warn(dev->net,
1131 					    "Timeout on OTP_STATUS completion");
1132 				return -EIO;
1133 			}
1134 		} while (buf & OTP_STATUS_BUSY_);
1135 	}
1136 
1137 	return 0;
1138 }
1139 
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1140 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1141 			    u32 length, u8 *data)
1142 {
1143 	u8 sig;
1144 	int ret;
1145 
1146 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1147 
1148 	if (ret == 0) {
1149 		if (sig == OTP_INDICATOR_2)
1150 			offset += 0x100;
1151 		else if (sig != OTP_INDICATOR_1)
1152 			ret = -EINVAL;
1153 		if (!ret)
1154 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1155 	}
1156 
1157 	return ret;
1158 }
1159 
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)1160 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1161 {
1162 	int i, ret;
1163 
1164 	for (i = 0; i < 100; i++) {
1165 		u32 dp_sel;
1166 
1167 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1168 		if (unlikely(ret < 0))
1169 			return -EIO;
1170 
1171 		if (dp_sel & DP_SEL_DPRDY_)
1172 			return 0;
1173 
1174 		usleep_range(40, 100);
1175 	}
1176 
1177 	netdev_warn(dev->net, "%s timed out", __func__);
1178 
1179 	return -EIO;
1180 }
1181 
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)1182 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1183 				  u32 addr, u32 length, u32 *buf)
1184 {
1185 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1186 	u32 dp_sel;
1187 	int i, ret;
1188 
1189 	if (usb_autopm_get_interface(dev->intf) < 0)
1190 		return 0;
1191 
1192 	mutex_lock(&pdata->dataport_mutex);
1193 
1194 	ret = lan78xx_dataport_wait_not_busy(dev);
1195 	if (ret < 0)
1196 		goto done;
1197 
1198 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1199 
1200 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1201 	dp_sel |= ram_select;
1202 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1203 
1204 	for (i = 0; i < length; i++) {
1205 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1206 
1207 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1208 
1209 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1210 
1211 		ret = lan78xx_dataport_wait_not_busy(dev);
1212 		if (ret < 0)
1213 			goto done;
1214 	}
1215 
1216 done:
1217 	mutex_unlock(&pdata->dataport_mutex);
1218 	usb_autopm_put_interface(dev->intf);
1219 
1220 	return ret;
1221 }
1222 
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])1223 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1224 				    int index, u8 addr[ETH_ALEN])
1225 {
1226 	u32 temp;
1227 
1228 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1229 		temp = addr[3];
1230 		temp = addr[2] | (temp << 8);
1231 		temp = addr[1] | (temp << 8);
1232 		temp = addr[0] | (temp << 8);
1233 		pdata->pfilter_table[index][1] = temp;
1234 		temp = addr[5];
1235 		temp = addr[4] | (temp << 8);
1236 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1237 		pdata->pfilter_table[index][0] = temp;
1238 	}
1239 }
1240 
1241 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])1242 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1243 {
1244 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1245 }
1246 
lan78xx_deferred_multicast_write(struct work_struct * param)1247 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1248 {
1249 	struct lan78xx_priv *pdata =
1250 			container_of(param, struct lan78xx_priv, set_multicast);
1251 	struct lan78xx_net *dev = pdata->dev;
1252 	int i;
1253 
1254 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1255 		  pdata->rfe_ctl);
1256 
1257 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1258 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1259 
1260 	for (i = 1; i < NUM_OF_MAF; i++) {
1261 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1262 		lan78xx_write_reg(dev, MAF_LO(i),
1263 				  pdata->pfilter_table[i][1]);
1264 		lan78xx_write_reg(dev, MAF_HI(i),
1265 				  pdata->pfilter_table[i][0]);
1266 	}
1267 
1268 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1269 }
1270 
lan78xx_set_multicast(struct net_device * netdev)1271 static void lan78xx_set_multicast(struct net_device *netdev)
1272 {
1273 	struct lan78xx_net *dev = netdev_priv(netdev);
1274 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1275 	unsigned long flags;
1276 	int i;
1277 
1278 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1279 
1280 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1281 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1282 
1283 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1284 		pdata->mchash_table[i] = 0;
1285 
1286 	/* pfilter_table[0] has own HW address */
1287 	for (i = 1; i < NUM_OF_MAF; i++) {
1288 		pdata->pfilter_table[i][0] = 0;
1289 		pdata->pfilter_table[i][1] = 0;
1290 	}
1291 
1292 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1293 
1294 	if (dev->net->flags & IFF_PROMISC) {
1295 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1296 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1297 	} else {
1298 		if (dev->net->flags & IFF_ALLMULTI) {
1299 			netif_dbg(dev, drv, dev->net,
1300 				  "receive all multicast enabled");
1301 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1302 		}
1303 	}
1304 
1305 	if (netdev_mc_count(dev->net)) {
1306 		struct netdev_hw_addr *ha;
1307 		int i;
1308 
1309 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1310 
1311 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1312 
1313 		i = 1;
1314 		netdev_for_each_mc_addr(ha, netdev) {
1315 			/* set first 32 into Perfect Filter */
1316 			if (i < 33) {
1317 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1318 			} else {
1319 				u32 bitnum = lan78xx_hash(ha->addr);
1320 
1321 				pdata->mchash_table[bitnum / 32] |=
1322 							(1 << (bitnum % 32));
1323 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1324 			}
1325 			i++;
1326 		}
1327 	}
1328 
1329 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1330 
1331 	/* defer register writes to a sleepable context */
1332 	schedule_work(&pdata->set_multicast);
1333 }
1334 
lan78xx_update_flowcontrol(struct lan78xx_net * dev,u8 duplex,u16 lcladv,u16 rmtadv)1335 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1336 				      u16 lcladv, u16 rmtadv)
1337 {
1338 	u32 flow = 0, fct_flow = 0;
1339 	u8 cap;
1340 
1341 	if (dev->fc_autoneg)
1342 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1343 	else
1344 		cap = dev->fc_request_control;
1345 
1346 	if (cap & FLOW_CTRL_TX)
1347 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1348 
1349 	if (cap & FLOW_CTRL_RX)
1350 		flow |= FLOW_CR_RX_FCEN_;
1351 
1352 	if (dev->udev->speed == USB_SPEED_SUPER)
1353 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1354 	else if (dev->udev->speed == USB_SPEED_HIGH)
1355 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1356 
1357 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1358 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1359 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1360 
1361 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1362 
1363 	/* threshold value should be set before enabling flow */
1364 	lan78xx_write_reg(dev, FLOW, flow);
1365 
1366 	return 0;
1367 }
1368 
1369 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1370 
lan78xx_mac_reset(struct lan78xx_net * dev)1371 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1372 {
1373 	unsigned long start_time = jiffies;
1374 	u32 val;
1375 	int ret;
1376 
1377 	mutex_lock(&dev->phy_mutex);
1378 
1379 	/* Resetting the device while there is activity on the MDIO
1380 	 * bus can result in the MAC interface locking up and not
1381 	 * completing register access transactions.
1382 	 */
1383 	ret = lan78xx_phy_wait_not_busy(dev);
1384 	if (ret < 0)
1385 		goto done;
1386 
1387 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1388 	if (ret < 0)
1389 		goto done;
1390 
1391 	val |= MAC_CR_RST_;
1392 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1393 	if (ret < 0)
1394 		goto done;
1395 
1396 	/* Wait for the reset to complete before allowing any further
1397 	 * MAC register accesses otherwise the MAC may lock up.
1398 	 */
1399 	do {
1400 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1401 		if (ret < 0)
1402 			goto done;
1403 
1404 		if (!(val & MAC_CR_RST_)) {
1405 			ret = 0;
1406 			goto done;
1407 		}
1408 	} while (!time_after(jiffies, start_time + HZ));
1409 
1410 	ret = -ETIMEDOUT;
1411 done:
1412 	mutex_unlock(&dev->phy_mutex);
1413 
1414 	return ret;
1415 }
1416 
lan78xx_link_reset(struct lan78xx_net * dev)1417 static int lan78xx_link_reset(struct lan78xx_net *dev)
1418 {
1419 	struct phy_device *phydev = dev->net->phydev;
1420 	struct ethtool_link_ksettings ecmd;
1421 	int ladv, radv, ret, link;
1422 	u32 buf;
1423 
1424 	/* clear LAN78xx interrupt status */
1425 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1426 	if (unlikely(ret < 0))
1427 		return ret;
1428 
1429 	mutex_lock(&phydev->lock);
1430 	phy_read_status(phydev);
1431 	link = phydev->link;
1432 	mutex_unlock(&phydev->lock);
1433 
1434 	if (!link && dev->link_on) {
1435 		dev->link_on = false;
1436 
1437 		/* reset MAC */
1438 		ret = lan78xx_mac_reset(dev);
1439 		if (ret < 0)
1440 			return ret;
1441 
1442 		del_timer(&dev->stat_monitor);
1443 	} else if (link && !dev->link_on) {
1444 		dev->link_on = true;
1445 
1446 		phy_ethtool_ksettings_get(phydev, &ecmd);
1447 
1448 		if (dev->udev->speed == USB_SPEED_SUPER) {
1449 			if (ecmd.base.speed == 1000) {
1450 				/* disable U2 */
1451 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1452 				if (ret < 0)
1453 					return ret;
1454 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1455 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1456 				if (ret < 0)
1457 					return ret;
1458 				/* enable U1 */
1459 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1460 				if (ret < 0)
1461 					return ret;
1462 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1463 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1464 				if (ret < 0)
1465 					return ret;
1466 			} else {
1467 				/* enable U1 & U2 */
1468 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1469 				if (ret < 0)
1470 					return ret;
1471 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1472 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1473 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1474 				if (ret < 0)
1475 					return ret;
1476 			}
1477 		}
1478 
1479 		ladv = phy_read(phydev, MII_ADVERTISE);
1480 		if (ladv < 0)
1481 			return ladv;
1482 
1483 		radv = phy_read(phydev, MII_LPA);
1484 		if (radv < 0)
1485 			return radv;
1486 
1487 		netif_dbg(dev, link, dev->net,
1488 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1489 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1490 
1491 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1492 						 radv);
1493 		if (ret < 0)
1494 			return ret;
1495 
1496 		if (!timer_pending(&dev->stat_monitor)) {
1497 			dev->delta = 1;
1498 			mod_timer(&dev->stat_monitor,
1499 				  jiffies + STAT_UPDATE_TIMER);
1500 		}
1501 
1502 		lan78xx_rx_urb_submit_all(dev);
1503 
1504 		local_bh_disable();
1505 		napi_schedule(&dev->napi);
1506 		local_bh_enable();
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 /* some work can't be done in tasklets, so we use keventd
1513  *
1514  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1515  * but tasklet_schedule() doesn't.	hope the failure is rare.
1516  */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1517 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1518 {
1519 	set_bit(work, &dev->flags);
1520 	if (!schedule_delayed_work(&dev->wq, 0))
1521 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1522 }
1523 
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1524 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1525 {
1526 	u32 intdata;
1527 
1528 	if (urb->actual_length != 4) {
1529 		netdev_warn(dev->net,
1530 			    "unexpected urb length %d", urb->actual_length);
1531 		return;
1532 	}
1533 
1534 	intdata = get_unaligned_le32(urb->transfer_buffer);
1535 
1536 	if (intdata & INT_ENP_PHY_INT) {
1537 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1538 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1539 
1540 		if (dev->domain_data.phyirq > 0)
1541 			generic_handle_irq_safe(dev->domain_data.phyirq);
1542 	} else {
1543 		netdev_warn(dev->net,
1544 			    "unexpected interrupt: 0x%08x\n", intdata);
1545 	}
1546 }
1547 
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1548 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1549 {
1550 	return MAX_EEPROM_SIZE;
1551 }
1552 
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1553 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1554 				      struct ethtool_eeprom *ee, u8 *data)
1555 {
1556 	struct lan78xx_net *dev = netdev_priv(netdev);
1557 	int ret;
1558 
1559 	ret = usb_autopm_get_interface(dev->intf);
1560 	if (ret)
1561 		return ret;
1562 
1563 	ee->magic = LAN78XX_EEPROM_MAGIC;
1564 
1565 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1566 
1567 	usb_autopm_put_interface(dev->intf);
1568 
1569 	return ret;
1570 }
1571 
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1572 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1573 				      struct ethtool_eeprom *ee, u8 *data)
1574 {
1575 	struct lan78xx_net *dev = netdev_priv(netdev);
1576 	int ret;
1577 
1578 	ret = usb_autopm_get_interface(dev->intf);
1579 	if (ret)
1580 		return ret;
1581 
1582 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1583 	 * to load data from EEPROM
1584 	 */
1585 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1586 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1587 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1588 		 (ee->offset == 0) &&
1589 		 (ee->len == 512) &&
1590 		 (data[0] == OTP_INDICATOR_1))
1591 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1592 
1593 	usb_autopm_put_interface(dev->intf);
1594 
1595 	return ret;
1596 }
1597 
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1598 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1599 				u8 *data)
1600 {
1601 	if (stringset == ETH_SS_STATS)
1602 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1603 }
1604 
lan78xx_get_sset_count(struct net_device * netdev,int sset)1605 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1606 {
1607 	if (sset == ETH_SS_STATS)
1608 		return ARRAY_SIZE(lan78xx_gstrings);
1609 	else
1610 		return -EOPNOTSUPP;
1611 }
1612 
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1613 static void lan78xx_get_stats(struct net_device *netdev,
1614 			      struct ethtool_stats *stats, u64 *data)
1615 {
1616 	struct lan78xx_net *dev = netdev_priv(netdev);
1617 
1618 	lan78xx_update_stats(dev);
1619 
1620 	mutex_lock(&dev->stats.access_lock);
1621 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1622 	mutex_unlock(&dev->stats.access_lock);
1623 }
1624 
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1625 static void lan78xx_get_wol(struct net_device *netdev,
1626 			    struct ethtool_wolinfo *wol)
1627 {
1628 	struct lan78xx_net *dev = netdev_priv(netdev);
1629 	int ret;
1630 	u32 buf;
1631 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1632 
1633 	if (usb_autopm_get_interface(dev->intf) < 0)
1634 		return;
1635 
1636 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1637 	if (unlikely(ret < 0)) {
1638 		wol->supported = 0;
1639 		wol->wolopts = 0;
1640 	} else {
1641 		if (buf & USB_CFG_RMT_WKP_) {
1642 			wol->supported = WAKE_ALL;
1643 			wol->wolopts = pdata->wol;
1644 		} else {
1645 			wol->supported = 0;
1646 			wol->wolopts = 0;
1647 		}
1648 	}
1649 
1650 	usb_autopm_put_interface(dev->intf);
1651 }
1652 
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1653 static int lan78xx_set_wol(struct net_device *netdev,
1654 			   struct ethtool_wolinfo *wol)
1655 {
1656 	struct lan78xx_net *dev = netdev_priv(netdev);
1657 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1658 	int ret;
1659 
1660 	if (wol->wolopts & ~WAKE_ALL)
1661 		return -EINVAL;
1662 
1663 	ret = usb_autopm_get_interface(dev->intf);
1664 	if (ret < 0)
1665 		return ret;
1666 
1667 	pdata->wol = wol->wolopts;
1668 
1669 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1670 
1671 	phy_ethtool_set_wol(netdev->phydev, wol);
1672 
1673 	usb_autopm_put_interface(dev->intf);
1674 
1675 	return ret;
1676 }
1677 
lan78xx_get_eee(struct net_device * net,struct ethtool_eee * edata)1678 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1679 {
1680 	struct lan78xx_net *dev = netdev_priv(net);
1681 	struct phy_device *phydev = net->phydev;
1682 	int ret;
1683 	u32 buf;
1684 
1685 	ret = usb_autopm_get_interface(dev->intf);
1686 	if (ret < 0)
1687 		return ret;
1688 
1689 	ret = phy_ethtool_get_eee(phydev, edata);
1690 	if (ret < 0)
1691 		goto exit;
1692 
1693 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1694 	if (buf & MAC_CR_EEE_EN_) {
1695 		edata->eee_enabled = true;
1696 		edata->eee_active = !!(edata->advertised &
1697 				       edata->lp_advertised);
1698 		edata->tx_lpi_enabled = true;
1699 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1700 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1701 		edata->tx_lpi_timer = buf;
1702 	} else {
1703 		edata->eee_enabled = false;
1704 		edata->eee_active = false;
1705 		edata->tx_lpi_enabled = false;
1706 		edata->tx_lpi_timer = 0;
1707 	}
1708 
1709 	ret = 0;
1710 exit:
1711 	usb_autopm_put_interface(dev->intf);
1712 
1713 	return ret;
1714 }
1715 
lan78xx_set_eee(struct net_device * net,struct ethtool_eee * edata)1716 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1717 {
1718 	struct lan78xx_net *dev = netdev_priv(net);
1719 	int ret;
1720 	u32 buf;
1721 
1722 	ret = usb_autopm_get_interface(dev->intf);
1723 	if (ret < 0)
1724 		return ret;
1725 
1726 	if (edata->eee_enabled) {
1727 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1728 		buf |= MAC_CR_EEE_EN_;
1729 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1730 
1731 		phy_ethtool_set_eee(net->phydev, edata);
1732 
1733 		buf = (u32)edata->tx_lpi_timer;
1734 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1735 	} else {
1736 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1737 		buf &= ~MAC_CR_EEE_EN_;
1738 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1739 	}
1740 
1741 	usb_autopm_put_interface(dev->intf);
1742 
1743 	return 0;
1744 }
1745 
lan78xx_get_link(struct net_device * net)1746 static u32 lan78xx_get_link(struct net_device *net)
1747 {
1748 	u32 link;
1749 
1750 	mutex_lock(&net->phydev->lock);
1751 	phy_read_status(net->phydev);
1752 	link = net->phydev->link;
1753 	mutex_unlock(&net->phydev->lock);
1754 
1755 	return link;
1756 }
1757 
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1758 static void lan78xx_get_drvinfo(struct net_device *net,
1759 				struct ethtool_drvinfo *info)
1760 {
1761 	struct lan78xx_net *dev = netdev_priv(net);
1762 
1763 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1764 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1765 }
1766 
lan78xx_get_msglevel(struct net_device * net)1767 static u32 lan78xx_get_msglevel(struct net_device *net)
1768 {
1769 	struct lan78xx_net *dev = netdev_priv(net);
1770 
1771 	return dev->msg_enable;
1772 }
1773 
lan78xx_set_msglevel(struct net_device * net,u32 level)1774 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1775 {
1776 	struct lan78xx_net *dev = netdev_priv(net);
1777 
1778 	dev->msg_enable = level;
1779 }
1780 
lan78xx_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)1781 static int lan78xx_get_link_ksettings(struct net_device *net,
1782 				      struct ethtool_link_ksettings *cmd)
1783 {
1784 	struct lan78xx_net *dev = netdev_priv(net);
1785 	struct phy_device *phydev = net->phydev;
1786 	int ret;
1787 
1788 	ret = usb_autopm_get_interface(dev->intf);
1789 	if (ret < 0)
1790 		return ret;
1791 
1792 	phy_ethtool_ksettings_get(phydev, cmd);
1793 
1794 	usb_autopm_put_interface(dev->intf);
1795 
1796 	return ret;
1797 }
1798 
lan78xx_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)1799 static int lan78xx_set_link_ksettings(struct net_device *net,
1800 				      const struct ethtool_link_ksettings *cmd)
1801 {
1802 	struct lan78xx_net *dev = netdev_priv(net);
1803 	struct phy_device *phydev = net->phydev;
1804 	int ret = 0;
1805 	int temp;
1806 
1807 	ret = usb_autopm_get_interface(dev->intf);
1808 	if (ret < 0)
1809 		return ret;
1810 
1811 	/* change speed & duplex */
1812 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1813 
1814 	if (!cmd->base.autoneg) {
1815 		/* force link down */
1816 		temp = phy_read(phydev, MII_BMCR);
1817 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1818 		mdelay(1);
1819 		phy_write(phydev, MII_BMCR, temp);
1820 	}
1821 
1822 	usb_autopm_put_interface(dev->intf);
1823 
1824 	return ret;
1825 }
1826 
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)1827 static void lan78xx_get_pause(struct net_device *net,
1828 			      struct ethtool_pauseparam *pause)
1829 {
1830 	struct lan78xx_net *dev = netdev_priv(net);
1831 	struct phy_device *phydev = net->phydev;
1832 	struct ethtool_link_ksettings ecmd;
1833 
1834 	phy_ethtool_ksettings_get(phydev, &ecmd);
1835 
1836 	pause->autoneg = dev->fc_autoneg;
1837 
1838 	if (dev->fc_request_control & FLOW_CTRL_TX)
1839 		pause->tx_pause = 1;
1840 
1841 	if (dev->fc_request_control & FLOW_CTRL_RX)
1842 		pause->rx_pause = 1;
1843 }
1844 
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)1845 static int lan78xx_set_pause(struct net_device *net,
1846 			     struct ethtool_pauseparam *pause)
1847 {
1848 	struct lan78xx_net *dev = netdev_priv(net);
1849 	struct phy_device *phydev = net->phydev;
1850 	struct ethtool_link_ksettings ecmd;
1851 	int ret;
1852 
1853 	phy_ethtool_ksettings_get(phydev, &ecmd);
1854 
1855 	if (pause->autoneg && !ecmd.base.autoneg) {
1856 		ret = -EINVAL;
1857 		goto exit;
1858 	}
1859 
1860 	dev->fc_request_control = 0;
1861 	if (pause->rx_pause)
1862 		dev->fc_request_control |= FLOW_CTRL_RX;
1863 
1864 	if (pause->tx_pause)
1865 		dev->fc_request_control |= FLOW_CTRL_TX;
1866 
1867 	if (ecmd.base.autoneg) {
1868 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1869 		u32 mii_adv;
1870 
1871 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1872 				   ecmd.link_modes.advertising);
1873 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1874 				   ecmd.link_modes.advertising);
1875 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1876 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1877 		linkmode_or(ecmd.link_modes.advertising, fc,
1878 			    ecmd.link_modes.advertising);
1879 
1880 		phy_ethtool_ksettings_set(phydev, &ecmd);
1881 	}
1882 
1883 	dev->fc_autoneg = pause->autoneg;
1884 
1885 	ret = 0;
1886 exit:
1887 	return ret;
1888 }
1889 
lan78xx_get_regs_len(struct net_device * netdev)1890 static int lan78xx_get_regs_len(struct net_device *netdev)
1891 {
1892 	if (!netdev->phydev)
1893 		return (sizeof(lan78xx_regs));
1894 	else
1895 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1896 }
1897 
1898 static void
lan78xx_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * buf)1899 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1900 		 void *buf)
1901 {
1902 	u32 *data = buf;
1903 	int i, j;
1904 	struct lan78xx_net *dev = netdev_priv(netdev);
1905 
1906 	/* Read Device/MAC registers */
1907 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1908 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1909 
1910 	if (!netdev->phydev)
1911 		return;
1912 
1913 	/* Read PHY registers */
1914 	for (j = 0; j < 32; i++, j++)
1915 		data[i] = phy_read(netdev->phydev, j);
1916 }
1917 
1918 static const struct ethtool_ops lan78xx_ethtool_ops = {
1919 	.get_link	= lan78xx_get_link,
1920 	.nway_reset	= phy_ethtool_nway_reset,
1921 	.get_drvinfo	= lan78xx_get_drvinfo,
1922 	.get_msglevel	= lan78xx_get_msglevel,
1923 	.set_msglevel	= lan78xx_set_msglevel,
1924 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1925 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1926 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1927 	.get_ethtool_stats = lan78xx_get_stats,
1928 	.get_sset_count = lan78xx_get_sset_count,
1929 	.get_strings	= lan78xx_get_strings,
1930 	.get_wol	= lan78xx_get_wol,
1931 	.set_wol	= lan78xx_set_wol,
1932 	.get_ts_info	= ethtool_op_get_ts_info,
1933 	.get_eee	= lan78xx_get_eee,
1934 	.set_eee	= lan78xx_set_eee,
1935 	.get_pauseparam	= lan78xx_get_pause,
1936 	.set_pauseparam	= lan78xx_set_pause,
1937 	.get_link_ksettings = lan78xx_get_link_ksettings,
1938 	.set_link_ksettings = lan78xx_set_link_ksettings,
1939 	.get_regs_len	= lan78xx_get_regs_len,
1940 	.get_regs	= lan78xx_get_regs,
1941 };
1942 
lan78xx_init_mac_address(struct lan78xx_net * dev)1943 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1944 {
1945 	u32 addr_lo, addr_hi;
1946 	u8 addr[6];
1947 
1948 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1949 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1950 
1951 	addr[0] = addr_lo & 0xFF;
1952 	addr[1] = (addr_lo >> 8) & 0xFF;
1953 	addr[2] = (addr_lo >> 16) & 0xFF;
1954 	addr[3] = (addr_lo >> 24) & 0xFF;
1955 	addr[4] = addr_hi & 0xFF;
1956 	addr[5] = (addr_hi >> 8) & 0xFF;
1957 
1958 	if (!is_valid_ether_addr(addr)) {
1959 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1960 			/* valid address present in Device Tree */
1961 			netif_dbg(dev, ifup, dev->net,
1962 				  "MAC address read from Device Tree");
1963 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1964 						 ETH_ALEN, addr) == 0) ||
1965 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1966 					      ETH_ALEN, addr) == 0)) &&
1967 			   is_valid_ether_addr(addr)) {
1968 			/* eeprom values are valid so use them */
1969 			netif_dbg(dev, ifup, dev->net,
1970 				  "MAC address read from EEPROM");
1971 		} else {
1972 			/* generate random MAC */
1973 			eth_random_addr(addr);
1974 			netif_dbg(dev, ifup, dev->net,
1975 				  "MAC address set to random addr");
1976 		}
1977 
1978 		addr_lo = addr[0] | (addr[1] << 8) |
1979 			  (addr[2] << 16) | (addr[3] << 24);
1980 		addr_hi = addr[4] | (addr[5] << 8);
1981 
1982 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1983 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1984 	}
1985 
1986 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1987 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1988 
1989 	eth_hw_addr_set(dev->net, addr);
1990 }
1991 
1992 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1993 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1994 {
1995 	struct lan78xx_net *dev = bus->priv;
1996 	u32 val, addr;
1997 	int ret;
1998 
1999 	ret = usb_autopm_get_interface(dev->intf);
2000 	if (ret < 0)
2001 		return ret;
2002 
2003 	mutex_lock(&dev->phy_mutex);
2004 
2005 	/* confirm MII not busy */
2006 	ret = lan78xx_phy_wait_not_busy(dev);
2007 	if (ret < 0)
2008 		goto done;
2009 
2010 	/* set the address, index & direction (read from PHY) */
2011 	addr = mii_access(phy_id, idx, MII_READ);
2012 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2013 
2014 	ret = lan78xx_phy_wait_not_busy(dev);
2015 	if (ret < 0)
2016 		goto done;
2017 
2018 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2019 
2020 	ret = (int)(val & 0xFFFF);
2021 
2022 done:
2023 	mutex_unlock(&dev->phy_mutex);
2024 	usb_autopm_put_interface(dev->intf);
2025 
2026 	return ret;
2027 }
2028 
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)2029 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2030 				 u16 regval)
2031 {
2032 	struct lan78xx_net *dev = bus->priv;
2033 	u32 val, addr;
2034 	int ret;
2035 
2036 	ret = usb_autopm_get_interface(dev->intf);
2037 	if (ret < 0)
2038 		return ret;
2039 
2040 	mutex_lock(&dev->phy_mutex);
2041 
2042 	/* confirm MII not busy */
2043 	ret = lan78xx_phy_wait_not_busy(dev);
2044 	if (ret < 0)
2045 		goto done;
2046 
2047 	val = (u32)regval;
2048 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2049 
2050 	/* set the address, index & direction (write to PHY) */
2051 	addr = mii_access(phy_id, idx, MII_WRITE);
2052 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2053 
2054 	ret = lan78xx_phy_wait_not_busy(dev);
2055 	if (ret < 0)
2056 		goto done;
2057 
2058 done:
2059 	mutex_unlock(&dev->phy_mutex);
2060 	usb_autopm_put_interface(dev->intf);
2061 	return 0;
2062 }
2063 
lan78xx_mdio_init(struct lan78xx_net * dev)2064 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2065 {
2066 	struct device_node *node;
2067 	int ret;
2068 
2069 	dev->mdiobus = mdiobus_alloc();
2070 	if (!dev->mdiobus) {
2071 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2072 		return -ENOMEM;
2073 	}
2074 
2075 	dev->mdiobus->priv = (void *)dev;
2076 	dev->mdiobus->read = lan78xx_mdiobus_read;
2077 	dev->mdiobus->write = lan78xx_mdiobus_write;
2078 	dev->mdiobus->name = "lan78xx-mdiobus";
2079 	dev->mdiobus->parent = &dev->udev->dev;
2080 
2081 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2082 		 dev->udev->bus->busnum, dev->udev->devnum);
2083 
2084 	switch (dev->chipid) {
2085 	case ID_REV_CHIP_ID_7800_:
2086 	case ID_REV_CHIP_ID_7850_:
2087 		/* set to internal PHY id */
2088 		dev->mdiobus->phy_mask = ~(1 << 1);
2089 		break;
2090 	case ID_REV_CHIP_ID_7801_:
2091 		/* scan thru PHYAD[2..0] */
2092 		dev->mdiobus->phy_mask = ~(0xFF);
2093 		break;
2094 	}
2095 
2096 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2097 	ret = of_mdiobus_register(dev->mdiobus, node);
2098 	of_node_put(node);
2099 	if (ret) {
2100 		netdev_err(dev->net, "can't register MDIO bus\n");
2101 		goto exit1;
2102 	}
2103 
2104 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2105 	return 0;
2106 exit1:
2107 	mdiobus_free(dev->mdiobus);
2108 	return ret;
2109 }
2110 
lan78xx_remove_mdio(struct lan78xx_net * dev)2111 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2112 {
2113 	mdiobus_unregister(dev->mdiobus);
2114 	mdiobus_free(dev->mdiobus);
2115 }
2116 
lan78xx_link_status_change(struct net_device * net)2117 static void lan78xx_link_status_change(struct net_device *net)
2118 {
2119 	struct phy_device *phydev = net->phydev;
2120 
2121 	phy_print_status(phydev);
2122 }
2123 
irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)2124 static int irq_map(struct irq_domain *d, unsigned int irq,
2125 		   irq_hw_number_t hwirq)
2126 {
2127 	struct irq_domain_data *data = d->host_data;
2128 
2129 	irq_set_chip_data(irq, data);
2130 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2131 	irq_set_noprobe(irq);
2132 
2133 	return 0;
2134 }
2135 
irq_unmap(struct irq_domain * d,unsigned int irq)2136 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2137 {
2138 	irq_set_chip_and_handler(irq, NULL, NULL);
2139 	irq_set_chip_data(irq, NULL);
2140 }
2141 
2142 static const struct irq_domain_ops chip_domain_ops = {
2143 	.map	= irq_map,
2144 	.unmap	= irq_unmap,
2145 };
2146 
lan78xx_irq_mask(struct irq_data * irqd)2147 static void lan78xx_irq_mask(struct irq_data *irqd)
2148 {
2149 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2150 
2151 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2152 }
2153 
lan78xx_irq_unmask(struct irq_data * irqd)2154 static void lan78xx_irq_unmask(struct irq_data *irqd)
2155 {
2156 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2157 
2158 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2159 }
2160 
lan78xx_irq_bus_lock(struct irq_data * irqd)2161 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2162 {
2163 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2164 
2165 	mutex_lock(&data->irq_lock);
2166 }
2167 
lan78xx_irq_bus_sync_unlock(struct irq_data * irqd)2168 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2169 {
2170 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2171 	struct lan78xx_net *dev =
2172 			container_of(data, struct lan78xx_net, domain_data);
2173 	u32 buf;
2174 
2175 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2176 	 * are only two callbacks executed in non-atomic contex.
2177 	 */
2178 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2179 	if (buf != data->irqenable)
2180 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2181 
2182 	mutex_unlock(&data->irq_lock);
2183 }
2184 
2185 static struct irq_chip lan78xx_irqchip = {
2186 	.name			= "lan78xx-irqs",
2187 	.irq_mask		= lan78xx_irq_mask,
2188 	.irq_unmask		= lan78xx_irq_unmask,
2189 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2190 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2191 };
2192 
lan78xx_setup_irq_domain(struct lan78xx_net * dev)2193 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2194 {
2195 	struct device_node *of_node;
2196 	struct irq_domain *irqdomain;
2197 	unsigned int irqmap = 0;
2198 	u32 buf;
2199 	int ret = 0;
2200 
2201 	of_node = dev->udev->dev.parent->of_node;
2202 
2203 	mutex_init(&dev->domain_data.irq_lock);
2204 
2205 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2206 	dev->domain_data.irqenable = buf;
2207 
2208 	dev->domain_data.irqchip = &lan78xx_irqchip;
2209 	dev->domain_data.irq_handler = handle_simple_irq;
2210 
2211 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2212 					  &chip_domain_ops, &dev->domain_data);
2213 	if (irqdomain) {
2214 		/* create mapping for PHY interrupt */
2215 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2216 		if (!irqmap) {
2217 			irq_domain_remove(irqdomain);
2218 
2219 			irqdomain = NULL;
2220 			ret = -EINVAL;
2221 		}
2222 	} else {
2223 		ret = -EINVAL;
2224 	}
2225 
2226 	dev->domain_data.irqdomain = irqdomain;
2227 	dev->domain_data.phyirq = irqmap;
2228 
2229 	return ret;
2230 }
2231 
lan78xx_remove_irq_domain(struct lan78xx_net * dev)2232 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2233 {
2234 	if (dev->domain_data.phyirq > 0) {
2235 		irq_dispose_mapping(dev->domain_data.phyirq);
2236 
2237 		if (dev->domain_data.irqdomain)
2238 			irq_domain_remove(dev->domain_data.irqdomain);
2239 	}
2240 	dev->domain_data.phyirq = 0;
2241 	dev->domain_data.irqdomain = NULL;
2242 }
2243 
lan8835_fixup(struct phy_device * phydev)2244 static int lan8835_fixup(struct phy_device *phydev)
2245 {
2246 	int buf;
2247 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2248 
2249 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2250 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2251 	buf &= ~0x1800;
2252 	buf |= 0x0800;
2253 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2254 
2255 	/* RGMII MAC TXC Delay Enable */
2256 	lan78xx_write_reg(dev, MAC_RGMII_ID,
2257 			  MAC_RGMII_ID_TXC_DELAY_EN_);
2258 
2259 	/* RGMII TX DLL Tune Adjust */
2260 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2261 
2262 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2263 
2264 	return 1;
2265 }
2266 
ksz9031rnx_fixup(struct phy_device * phydev)2267 static int ksz9031rnx_fixup(struct phy_device *phydev)
2268 {
2269 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2270 
2271 	/* Micrel9301RNX PHY configuration */
2272 	/* RGMII Control Signal Pad Skew */
2273 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2274 	/* RGMII RX Data Pad Skew */
2275 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2276 	/* RGMII RX Clock Pad Skew */
2277 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2278 
2279 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2280 
2281 	return 1;
2282 }
2283 
lan7801_phy_init(struct lan78xx_net * dev)2284 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2285 {
2286 	u32 buf;
2287 	int ret;
2288 	struct fixed_phy_status fphy_status = {
2289 		.link = 1,
2290 		.speed = SPEED_1000,
2291 		.duplex = DUPLEX_FULL,
2292 	};
2293 	struct phy_device *phydev;
2294 
2295 	phydev = phy_find_first(dev->mdiobus);
2296 	if (!phydev) {
2297 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2298 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2299 		if (IS_ERR(phydev)) {
2300 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2301 			return NULL;
2302 		}
2303 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2304 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2305 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2306 					MAC_RGMII_ID_TXC_DELAY_EN_);
2307 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2308 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2309 		buf |= HW_CFG_CLK125_EN_;
2310 		buf |= HW_CFG_REFCLK25_EN_;
2311 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2312 	} else {
2313 		if (!phydev->drv) {
2314 			netdev_err(dev->net, "no PHY driver found\n");
2315 			return NULL;
2316 		}
2317 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2318 		/* external PHY fixup for KSZ9031RNX */
2319 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2320 						 ksz9031rnx_fixup);
2321 		if (ret < 0) {
2322 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2323 			return NULL;
2324 		}
2325 		/* external PHY fixup for LAN8835 */
2326 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2327 						 lan8835_fixup);
2328 		if (ret < 0) {
2329 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2330 			return NULL;
2331 		}
2332 		/* add more external PHY fixup here if needed */
2333 
2334 		phydev->is_internal = false;
2335 	}
2336 	return phydev;
2337 }
2338 
lan78xx_phy_init(struct lan78xx_net * dev)2339 static int lan78xx_phy_init(struct lan78xx_net *dev)
2340 {
2341 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2342 	int ret;
2343 	u32 mii_adv;
2344 	struct phy_device *phydev;
2345 
2346 	switch (dev->chipid) {
2347 	case ID_REV_CHIP_ID_7801_:
2348 		phydev = lan7801_phy_init(dev);
2349 		if (!phydev) {
2350 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2351 			return -EIO;
2352 		}
2353 		break;
2354 
2355 	case ID_REV_CHIP_ID_7800_:
2356 	case ID_REV_CHIP_ID_7850_:
2357 		phydev = phy_find_first(dev->mdiobus);
2358 		if (!phydev) {
2359 			netdev_err(dev->net, "no PHY found\n");
2360 			return -EIO;
2361 		}
2362 		phydev->is_internal = true;
2363 		dev->interface = PHY_INTERFACE_MODE_GMII;
2364 		break;
2365 
2366 	default:
2367 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2368 		return -EIO;
2369 	}
2370 
2371 	/* if phyirq is not set, use polling mode in phylib */
2372 	if (dev->domain_data.phyirq > 0)
2373 		phydev->irq = dev->domain_data.phyirq;
2374 	else
2375 		phydev->irq = PHY_POLL;
2376 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2377 
2378 	/* set to AUTOMDIX */
2379 	phydev->mdix = ETH_TP_MDI_AUTO;
2380 
2381 	ret = phy_connect_direct(dev->net, phydev,
2382 				 lan78xx_link_status_change,
2383 				 dev->interface);
2384 	if (ret) {
2385 		netdev_err(dev->net, "can't attach PHY to %s\n",
2386 			   dev->mdiobus->id);
2387 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2388 			if (phy_is_pseudo_fixed_link(phydev)) {
2389 				fixed_phy_unregister(phydev);
2390 				phy_device_free(phydev);
2391 			} else {
2392 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2393 							     0xfffffff0);
2394 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2395 							     0xfffffff0);
2396 			}
2397 		}
2398 		return -EIO;
2399 	}
2400 
2401 	/* MAC doesn't support 1000T Half */
2402 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2403 
2404 	/* support both flow controls */
2405 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2406 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2407 			   phydev->advertising);
2408 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2409 			   phydev->advertising);
2410 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2411 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2412 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2413 
2414 	if (phydev->mdio.dev.of_node) {
2415 		u32 reg;
2416 		int len;
2417 
2418 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2419 						      "microchip,led-modes",
2420 						      sizeof(u32));
2421 		if (len >= 0) {
2422 			/* Ensure the appropriate LEDs are enabled */
2423 			lan78xx_read_reg(dev, HW_CFG, &reg);
2424 			reg &= ~(HW_CFG_LED0_EN_ |
2425 				 HW_CFG_LED1_EN_ |
2426 				 HW_CFG_LED2_EN_ |
2427 				 HW_CFG_LED3_EN_);
2428 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2429 				(len > 1) * HW_CFG_LED1_EN_ |
2430 				(len > 2) * HW_CFG_LED2_EN_ |
2431 				(len > 3) * HW_CFG_LED3_EN_;
2432 			lan78xx_write_reg(dev, HW_CFG, reg);
2433 		}
2434 	}
2435 
2436 	genphy_config_aneg(phydev);
2437 
2438 	dev->fc_autoneg = phydev->autoneg;
2439 
2440 	return 0;
2441 }
2442 
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)2443 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2444 {
2445 	u32 buf;
2446 	bool rxenabled;
2447 
2448 	lan78xx_read_reg(dev, MAC_RX, &buf);
2449 
2450 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2451 
2452 	if (rxenabled) {
2453 		buf &= ~MAC_RX_RXEN_;
2454 		lan78xx_write_reg(dev, MAC_RX, buf);
2455 	}
2456 
2457 	/* add 4 to size for FCS */
2458 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2459 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2460 
2461 	lan78xx_write_reg(dev, MAC_RX, buf);
2462 
2463 	if (rxenabled) {
2464 		buf |= MAC_RX_RXEN_;
2465 		lan78xx_write_reg(dev, MAC_RX, buf);
2466 	}
2467 
2468 	return 0;
2469 }
2470 
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)2471 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2472 {
2473 	struct sk_buff *skb;
2474 	unsigned long flags;
2475 	int count = 0;
2476 
2477 	spin_lock_irqsave(&q->lock, flags);
2478 	while (!skb_queue_empty(q)) {
2479 		struct skb_data	*entry;
2480 		struct urb *urb;
2481 		int ret;
2482 
2483 		skb_queue_walk(q, skb) {
2484 			entry = (struct skb_data *)skb->cb;
2485 			if (entry->state != unlink_start)
2486 				goto found;
2487 		}
2488 		break;
2489 found:
2490 		entry->state = unlink_start;
2491 		urb = entry->urb;
2492 
2493 		/* Get reference count of the URB to avoid it to be
2494 		 * freed during usb_unlink_urb, which may trigger
2495 		 * use-after-free problem inside usb_unlink_urb since
2496 		 * usb_unlink_urb is always racing with .complete
2497 		 * handler(include defer_bh).
2498 		 */
2499 		usb_get_urb(urb);
2500 		spin_unlock_irqrestore(&q->lock, flags);
2501 		/* during some PM-driven resume scenarios,
2502 		 * these (async) unlinks complete immediately
2503 		 */
2504 		ret = usb_unlink_urb(urb);
2505 		if (ret != -EINPROGRESS && ret != 0)
2506 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2507 		else
2508 			count++;
2509 		usb_put_urb(urb);
2510 		spin_lock_irqsave(&q->lock, flags);
2511 	}
2512 	spin_unlock_irqrestore(&q->lock, flags);
2513 	return count;
2514 }
2515 
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)2516 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2517 {
2518 	struct lan78xx_net *dev = netdev_priv(netdev);
2519 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2520 	int ret;
2521 
2522 	/* no second zero-length packet read wanted after mtu-sized packets */
2523 	if ((max_frame_len % dev->maxpacket) == 0)
2524 		return -EDOM;
2525 
2526 	ret = usb_autopm_get_interface(dev->intf);
2527 	if (ret < 0)
2528 		return ret;
2529 
2530 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2531 	if (!ret)
2532 		netdev->mtu = new_mtu;
2533 
2534 	usb_autopm_put_interface(dev->intf);
2535 
2536 	return ret;
2537 }
2538 
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2539 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2540 {
2541 	struct lan78xx_net *dev = netdev_priv(netdev);
2542 	struct sockaddr *addr = p;
2543 	u32 addr_lo, addr_hi;
2544 
2545 	if (netif_running(netdev))
2546 		return -EBUSY;
2547 
2548 	if (!is_valid_ether_addr(addr->sa_data))
2549 		return -EADDRNOTAVAIL;
2550 
2551 	eth_hw_addr_set(netdev, addr->sa_data);
2552 
2553 	addr_lo = netdev->dev_addr[0] |
2554 		  netdev->dev_addr[1] << 8 |
2555 		  netdev->dev_addr[2] << 16 |
2556 		  netdev->dev_addr[3] << 24;
2557 	addr_hi = netdev->dev_addr[4] |
2558 		  netdev->dev_addr[5] << 8;
2559 
2560 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2561 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2562 
2563 	/* Added to support MAC address changes */
2564 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2565 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2566 
2567 	return 0;
2568 }
2569 
2570 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)2571 static int lan78xx_set_features(struct net_device *netdev,
2572 				netdev_features_t features)
2573 {
2574 	struct lan78xx_net *dev = netdev_priv(netdev);
2575 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2576 	unsigned long flags;
2577 
2578 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2579 
2580 	if (features & NETIF_F_RXCSUM) {
2581 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2582 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2583 	} else {
2584 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2585 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2586 	}
2587 
2588 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2589 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2590 	else
2591 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2592 
2593 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2594 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2595 	else
2596 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2597 
2598 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2599 
2600 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2601 
2602 	return 0;
2603 }
2604 
lan78xx_deferred_vlan_write(struct work_struct * param)2605 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2606 {
2607 	struct lan78xx_priv *pdata =
2608 			container_of(param, struct lan78xx_priv, set_vlan);
2609 	struct lan78xx_net *dev = pdata->dev;
2610 
2611 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2612 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2613 }
2614 
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2615 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2616 				   __be16 proto, u16 vid)
2617 {
2618 	struct lan78xx_net *dev = netdev_priv(netdev);
2619 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2620 	u16 vid_bit_index;
2621 	u16 vid_dword_index;
2622 
2623 	vid_dword_index = (vid >> 5) & 0x7F;
2624 	vid_bit_index = vid & 0x1F;
2625 
2626 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2627 
2628 	/* defer register writes to a sleepable context */
2629 	schedule_work(&pdata->set_vlan);
2630 
2631 	return 0;
2632 }
2633 
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2634 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2635 				    __be16 proto, u16 vid)
2636 {
2637 	struct lan78xx_net *dev = netdev_priv(netdev);
2638 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2639 	u16 vid_bit_index;
2640 	u16 vid_dword_index;
2641 
2642 	vid_dword_index = (vid >> 5) & 0x7F;
2643 	vid_bit_index = vid & 0x1F;
2644 
2645 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2646 
2647 	/* defer register writes to a sleepable context */
2648 	schedule_work(&pdata->set_vlan);
2649 
2650 	return 0;
2651 }
2652 
lan78xx_init_ltm(struct lan78xx_net * dev)2653 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2654 {
2655 	int ret;
2656 	u32 buf;
2657 	u32 regs[6] = { 0 };
2658 
2659 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2660 	if (buf & USB_CFG1_LTM_ENABLE_) {
2661 		u8 temp[2];
2662 		/* Get values from EEPROM first */
2663 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2664 			if (temp[0] == 24) {
2665 				ret = lan78xx_read_raw_eeprom(dev,
2666 							      temp[1] * 2,
2667 							      24,
2668 							      (u8 *)regs);
2669 				if (ret < 0)
2670 					return;
2671 			}
2672 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2673 			if (temp[0] == 24) {
2674 				ret = lan78xx_read_raw_otp(dev,
2675 							   temp[1] * 2,
2676 							   24,
2677 							   (u8 *)regs);
2678 				if (ret < 0)
2679 					return;
2680 			}
2681 		}
2682 	}
2683 
2684 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2685 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2686 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2687 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2688 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2689 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2690 }
2691 
lan78xx_urb_config_init(struct lan78xx_net * dev)2692 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2693 {
2694 	int result = 0;
2695 
2696 	switch (dev->udev->speed) {
2697 	case USB_SPEED_SUPER:
2698 		dev->rx_urb_size = RX_SS_URB_SIZE;
2699 		dev->tx_urb_size = TX_SS_URB_SIZE;
2700 		dev->n_rx_urbs = RX_SS_URB_NUM;
2701 		dev->n_tx_urbs = TX_SS_URB_NUM;
2702 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2703 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2704 		break;
2705 	case USB_SPEED_HIGH:
2706 		dev->rx_urb_size = RX_HS_URB_SIZE;
2707 		dev->tx_urb_size = TX_HS_URB_SIZE;
2708 		dev->n_rx_urbs = RX_HS_URB_NUM;
2709 		dev->n_tx_urbs = TX_HS_URB_NUM;
2710 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2711 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2712 		break;
2713 	case USB_SPEED_FULL:
2714 		dev->rx_urb_size = RX_FS_URB_SIZE;
2715 		dev->tx_urb_size = TX_FS_URB_SIZE;
2716 		dev->n_rx_urbs = RX_FS_URB_NUM;
2717 		dev->n_tx_urbs = TX_FS_URB_NUM;
2718 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2719 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2720 		break;
2721 	default:
2722 		netdev_warn(dev->net, "USB bus speed not supported\n");
2723 		result = -EIO;
2724 		break;
2725 	}
2726 
2727 	return result;
2728 }
2729 
lan78xx_start_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enable)2730 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2731 {
2732 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2733 }
2734 
lan78xx_stop_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enabled,u32 hw_disabled)2735 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2736 			   u32 hw_disabled)
2737 {
2738 	unsigned long timeout;
2739 	bool stopped = true;
2740 	int ret;
2741 	u32 buf;
2742 
2743 	/* Stop the h/w block (if not already stopped) */
2744 
2745 	ret = lan78xx_read_reg(dev, reg, &buf);
2746 	if (ret < 0)
2747 		return ret;
2748 
2749 	if (buf & hw_enabled) {
2750 		buf &= ~hw_enabled;
2751 
2752 		ret = lan78xx_write_reg(dev, reg, buf);
2753 		if (ret < 0)
2754 			return ret;
2755 
2756 		stopped = false;
2757 		timeout = jiffies + HW_DISABLE_TIMEOUT;
2758 		do  {
2759 			ret = lan78xx_read_reg(dev, reg, &buf);
2760 			if (ret < 0)
2761 				return ret;
2762 
2763 			if (buf & hw_disabled)
2764 				stopped = true;
2765 			else
2766 				msleep(HW_DISABLE_DELAY_MS);
2767 		} while (!stopped && !time_after(jiffies, timeout));
2768 	}
2769 
2770 	ret = stopped ? 0 : -ETIME;
2771 
2772 	return ret;
2773 }
2774 
lan78xx_flush_fifo(struct lan78xx_net * dev,u32 reg,u32 fifo_flush)2775 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2776 {
2777 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2778 }
2779 
lan78xx_start_tx_path(struct lan78xx_net * dev)2780 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2781 {
2782 	int ret;
2783 
2784 	netif_dbg(dev, drv, dev->net, "start tx path");
2785 
2786 	/* Start the MAC transmitter */
2787 
2788 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2789 	if (ret < 0)
2790 		return ret;
2791 
2792 	/* Start the Tx FIFO */
2793 
2794 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2795 	if (ret < 0)
2796 		return ret;
2797 
2798 	return 0;
2799 }
2800 
lan78xx_stop_tx_path(struct lan78xx_net * dev)2801 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2802 {
2803 	int ret;
2804 
2805 	netif_dbg(dev, drv, dev->net, "stop tx path");
2806 
2807 	/* Stop the Tx FIFO */
2808 
2809 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2810 	if (ret < 0)
2811 		return ret;
2812 
2813 	/* Stop the MAC transmitter */
2814 
2815 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2816 	if (ret < 0)
2817 		return ret;
2818 
2819 	return 0;
2820 }
2821 
2822 /* The caller must ensure the Tx path is stopped before calling
2823  * lan78xx_flush_tx_fifo().
2824  */
lan78xx_flush_tx_fifo(struct lan78xx_net * dev)2825 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2826 {
2827 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2828 }
2829 
lan78xx_start_rx_path(struct lan78xx_net * dev)2830 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2831 {
2832 	int ret;
2833 
2834 	netif_dbg(dev, drv, dev->net, "start rx path");
2835 
2836 	/* Start the Rx FIFO */
2837 
2838 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2839 	if (ret < 0)
2840 		return ret;
2841 
2842 	/* Start the MAC receiver*/
2843 
2844 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2845 	if (ret < 0)
2846 		return ret;
2847 
2848 	return 0;
2849 }
2850 
lan78xx_stop_rx_path(struct lan78xx_net * dev)2851 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2852 {
2853 	int ret;
2854 
2855 	netif_dbg(dev, drv, dev->net, "stop rx path");
2856 
2857 	/* Stop the MAC receiver */
2858 
2859 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2860 	if (ret < 0)
2861 		return ret;
2862 
2863 	/* Stop the Rx FIFO */
2864 
2865 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2866 	if (ret < 0)
2867 		return ret;
2868 
2869 	return 0;
2870 }
2871 
2872 /* The caller must ensure the Rx path is stopped before calling
2873  * lan78xx_flush_rx_fifo().
2874  */
lan78xx_flush_rx_fifo(struct lan78xx_net * dev)2875 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2876 {
2877 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2878 }
2879 
lan78xx_reset(struct lan78xx_net * dev)2880 static int lan78xx_reset(struct lan78xx_net *dev)
2881 {
2882 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2883 	unsigned long timeout;
2884 	int ret;
2885 	u32 buf;
2886 	u8 sig;
2887 
2888 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2889 	if (ret < 0)
2890 		return ret;
2891 
2892 	buf |= HW_CFG_LRST_;
2893 
2894 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2895 	if (ret < 0)
2896 		return ret;
2897 
2898 	timeout = jiffies + HZ;
2899 	do {
2900 		mdelay(1);
2901 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2902 		if (ret < 0)
2903 			return ret;
2904 
2905 		if (time_after(jiffies, timeout)) {
2906 			netdev_warn(dev->net,
2907 				    "timeout on completion of LiteReset");
2908 			ret = -ETIMEDOUT;
2909 			return ret;
2910 		}
2911 	} while (buf & HW_CFG_LRST_);
2912 
2913 	lan78xx_init_mac_address(dev);
2914 
2915 	/* save DEVID for later usage */
2916 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2917 	if (ret < 0)
2918 		return ret;
2919 
2920 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2921 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2922 
2923 	/* Respond to the IN token with a NAK */
2924 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2925 	if (ret < 0)
2926 		return ret;
2927 
2928 	buf |= USB_CFG_BIR_;
2929 
2930 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2931 	if (ret < 0)
2932 		return ret;
2933 
2934 	/* Init LTM */
2935 	lan78xx_init_ltm(dev);
2936 
2937 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
2938 	if (ret < 0)
2939 		return ret;
2940 
2941 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
2942 	if (ret < 0)
2943 		return ret;
2944 
2945 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2946 	if (ret < 0)
2947 		return ret;
2948 
2949 	buf |= HW_CFG_MEF_;
2950 
2951 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2952 	if (ret < 0)
2953 		return ret;
2954 
2955 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2956 	if (ret < 0)
2957 		return ret;
2958 
2959 	buf |= USB_CFG_BCE_;
2960 
2961 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2962 	if (ret < 0)
2963 		return ret;
2964 
2965 	/* set FIFO sizes */
2966 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2967 
2968 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2969 	if (ret < 0)
2970 		return ret;
2971 
2972 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2973 
2974 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2975 	if (ret < 0)
2976 		return ret;
2977 
2978 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2979 	if (ret < 0)
2980 		return ret;
2981 
2982 	ret = lan78xx_write_reg(dev, FLOW, 0);
2983 	if (ret < 0)
2984 		return ret;
2985 
2986 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2987 	if (ret < 0)
2988 		return ret;
2989 
2990 	/* Don't need rfe_ctl_lock during initialisation */
2991 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2992 	if (ret < 0)
2993 		return ret;
2994 
2995 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2996 
2997 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2998 	if (ret < 0)
2999 		return ret;
3000 
3001 	/* Enable or disable checksum offload engines */
3002 	ret = lan78xx_set_features(dev->net, dev->net->features);
3003 	if (ret < 0)
3004 		return ret;
3005 
3006 	lan78xx_set_multicast(dev->net);
3007 
3008 	/* reset PHY */
3009 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3010 	if (ret < 0)
3011 		return ret;
3012 
3013 	buf |= PMT_CTL_PHY_RST_;
3014 
3015 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3016 	if (ret < 0)
3017 		return ret;
3018 
3019 	timeout = jiffies + HZ;
3020 	do {
3021 		mdelay(1);
3022 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3023 		if (ret < 0)
3024 			return ret;
3025 
3026 		if (time_after(jiffies, timeout)) {
3027 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3028 			ret = -ETIMEDOUT;
3029 			return ret;
3030 		}
3031 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3032 
3033 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3034 	if (ret < 0)
3035 		return ret;
3036 
3037 	/* LAN7801 only has RGMII mode */
3038 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
3039 		buf &= ~MAC_CR_GMII_EN_;
3040 
3041 	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3042 	    dev->chipid == ID_REV_CHIP_ID_7850_) {
3043 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3044 		if (!ret && sig != EEPROM_INDICATOR) {
3045 			/* Implies there is no external eeprom. Set mac speed */
3046 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3047 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3048 		}
3049 	}
3050 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3051 	if (ret < 0)
3052 		return ret;
3053 
3054 	ret = lan78xx_set_rx_max_frame_length(dev,
3055 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3056 
3057 	return ret;
3058 }
3059 
lan78xx_init_stats(struct lan78xx_net * dev)3060 static void lan78xx_init_stats(struct lan78xx_net *dev)
3061 {
3062 	u32 *p;
3063 	int i;
3064 
3065 	/* initialize for stats update
3066 	 * some counters are 20bits and some are 32bits
3067 	 */
3068 	p = (u32 *)&dev->stats.rollover_max;
3069 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3070 		p[i] = 0xFFFFF;
3071 
3072 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3073 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3074 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3075 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3076 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3077 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3078 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3079 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3080 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3081 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3082 
3083 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3084 }
3085 
lan78xx_open(struct net_device * net)3086 static int lan78xx_open(struct net_device *net)
3087 {
3088 	struct lan78xx_net *dev = netdev_priv(net);
3089 	int ret;
3090 
3091 	netif_dbg(dev, ifup, dev->net, "open device");
3092 
3093 	ret = usb_autopm_get_interface(dev->intf);
3094 	if (ret < 0)
3095 		return ret;
3096 
3097 	mutex_lock(&dev->dev_mutex);
3098 
3099 	phy_start(net->phydev);
3100 
3101 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3102 
3103 	/* for Link Check */
3104 	if (dev->urb_intr) {
3105 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3106 		if (ret < 0) {
3107 			netif_err(dev, ifup, dev->net,
3108 				  "intr submit %d\n", ret);
3109 			goto done;
3110 		}
3111 	}
3112 
3113 	ret = lan78xx_flush_rx_fifo(dev);
3114 	if (ret < 0)
3115 		goto done;
3116 	ret = lan78xx_flush_tx_fifo(dev);
3117 	if (ret < 0)
3118 		goto done;
3119 
3120 	ret = lan78xx_start_tx_path(dev);
3121 	if (ret < 0)
3122 		goto done;
3123 	ret = lan78xx_start_rx_path(dev);
3124 	if (ret < 0)
3125 		goto done;
3126 
3127 	lan78xx_init_stats(dev);
3128 
3129 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3130 
3131 	netif_start_queue(net);
3132 
3133 	dev->link_on = false;
3134 
3135 	napi_enable(&dev->napi);
3136 
3137 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3138 done:
3139 	mutex_unlock(&dev->dev_mutex);
3140 
3141 	if (ret < 0)
3142 		usb_autopm_put_interface(dev->intf);
3143 
3144 	return ret;
3145 }
3146 
lan78xx_terminate_urbs(struct lan78xx_net * dev)3147 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3148 {
3149 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3150 	DECLARE_WAITQUEUE(wait, current);
3151 	int temp;
3152 
3153 	/* ensure there are no more active urbs */
3154 	add_wait_queue(&unlink_wakeup, &wait);
3155 	set_current_state(TASK_UNINTERRUPTIBLE);
3156 	dev->wait = &unlink_wakeup;
3157 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3158 
3159 	/* maybe wait for deletions to finish. */
3160 	while (!skb_queue_empty(&dev->rxq) ||
3161 	       !skb_queue_empty(&dev->txq)) {
3162 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3163 		set_current_state(TASK_UNINTERRUPTIBLE);
3164 		netif_dbg(dev, ifdown, dev->net,
3165 			  "waited for %d urb completions", temp);
3166 	}
3167 	set_current_state(TASK_RUNNING);
3168 	dev->wait = NULL;
3169 	remove_wait_queue(&unlink_wakeup, &wait);
3170 
3171 	/* empty Rx done, Rx overflow and Tx pend queues
3172 	 */
3173 	while (!skb_queue_empty(&dev->rxq_done)) {
3174 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3175 
3176 		lan78xx_release_rx_buf(dev, skb);
3177 	}
3178 
3179 	skb_queue_purge(&dev->rxq_overflow);
3180 	skb_queue_purge(&dev->txq_pend);
3181 }
3182 
lan78xx_stop(struct net_device * net)3183 static int lan78xx_stop(struct net_device *net)
3184 {
3185 	struct lan78xx_net *dev = netdev_priv(net);
3186 
3187 	netif_dbg(dev, ifup, dev->net, "stop device");
3188 
3189 	mutex_lock(&dev->dev_mutex);
3190 
3191 	if (timer_pending(&dev->stat_monitor))
3192 		del_timer_sync(&dev->stat_monitor);
3193 
3194 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3195 	netif_stop_queue(net);
3196 	napi_disable(&dev->napi);
3197 
3198 	lan78xx_terminate_urbs(dev);
3199 
3200 	netif_info(dev, ifdown, dev->net,
3201 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3202 		   net->stats.rx_packets, net->stats.tx_packets,
3203 		   net->stats.rx_errors, net->stats.tx_errors);
3204 
3205 	/* ignore errors that occur stopping the Tx and Rx data paths */
3206 	lan78xx_stop_tx_path(dev);
3207 	lan78xx_stop_rx_path(dev);
3208 
3209 	if (net->phydev)
3210 		phy_stop(net->phydev);
3211 
3212 	usb_kill_urb(dev->urb_intr);
3213 
3214 	/* deferred work (task, timer, softirq) must also stop.
3215 	 * can't flush_scheduled_work() until we drop rtnl (later),
3216 	 * else workers could deadlock; so make workers a NOP.
3217 	 */
3218 	clear_bit(EVENT_TX_HALT, &dev->flags);
3219 	clear_bit(EVENT_RX_HALT, &dev->flags);
3220 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3221 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3222 
3223 	cancel_delayed_work_sync(&dev->wq);
3224 
3225 	usb_autopm_put_interface(dev->intf);
3226 
3227 	mutex_unlock(&dev->dev_mutex);
3228 
3229 	return 0;
3230 }
3231 
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)3232 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3233 			       struct sk_buff_head *list, enum skb_state state)
3234 {
3235 	unsigned long flags;
3236 	enum skb_state old_state;
3237 	struct skb_data *entry = (struct skb_data *)skb->cb;
3238 
3239 	spin_lock_irqsave(&list->lock, flags);
3240 	old_state = entry->state;
3241 	entry->state = state;
3242 
3243 	__skb_unlink(skb, list);
3244 	spin_unlock(&list->lock);
3245 	spin_lock(&dev->rxq_done.lock);
3246 
3247 	__skb_queue_tail(&dev->rxq_done, skb);
3248 	if (skb_queue_len(&dev->rxq_done) == 1)
3249 		napi_schedule(&dev->napi);
3250 
3251 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3252 
3253 	return old_state;
3254 }
3255 
tx_complete(struct urb * urb)3256 static void tx_complete(struct urb *urb)
3257 {
3258 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3259 	struct skb_data *entry = (struct skb_data *)skb->cb;
3260 	struct lan78xx_net *dev = entry->dev;
3261 
3262 	if (urb->status == 0) {
3263 		dev->net->stats.tx_packets += entry->num_of_packet;
3264 		dev->net->stats.tx_bytes += entry->length;
3265 	} else {
3266 		dev->net->stats.tx_errors += entry->num_of_packet;
3267 
3268 		switch (urb->status) {
3269 		case -EPIPE:
3270 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3271 			break;
3272 
3273 		/* software-driven interface shutdown */
3274 		case -ECONNRESET:
3275 		case -ESHUTDOWN:
3276 			netif_dbg(dev, tx_err, dev->net,
3277 				  "tx err interface gone %d\n",
3278 				  entry->urb->status);
3279 			break;
3280 
3281 		case -EPROTO:
3282 		case -ETIME:
3283 		case -EILSEQ:
3284 			netif_stop_queue(dev->net);
3285 			netif_dbg(dev, tx_err, dev->net,
3286 				  "tx err queue stopped %d\n",
3287 				  entry->urb->status);
3288 			break;
3289 		default:
3290 			netif_dbg(dev, tx_err, dev->net,
3291 				  "unknown tx err %d\n",
3292 				  entry->urb->status);
3293 			break;
3294 		}
3295 	}
3296 
3297 	usb_autopm_put_interface_async(dev->intf);
3298 
3299 	skb_unlink(skb, &dev->txq);
3300 
3301 	lan78xx_release_tx_buf(dev, skb);
3302 
3303 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3304 	 */
3305 	if (skb_queue_empty(&dev->txq) &&
3306 	    !skb_queue_empty(&dev->txq_pend))
3307 		napi_schedule(&dev->napi);
3308 }
3309 
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)3310 static void lan78xx_queue_skb(struct sk_buff_head *list,
3311 			      struct sk_buff *newsk, enum skb_state state)
3312 {
3313 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3314 
3315 	__skb_queue_tail(list, newsk);
3316 	entry->state = state;
3317 }
3318 
lan78xx_tx_urb_space(struct lan78xx_net * dev)3319 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3320 {
3321 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3322 }
3323 
lan78xx_tx_pend_data_len(struct lan78xx_net * dev)3324 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3325 {
3326 	return dev->tx_pend_data_len;
3327 }
3328 
lan78xx_tx_pend_skb_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3329 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3330 				    struct sk_buff *skb,
3331 				    unsigned int *tx_pend_data_len)
3332 {
3333 	unsigned long flags;
3334 
3335 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3336 
3337 	__skb_queue_tail(&dev->txq_pend, skb);
3338 
3339 	dev->tx_pend_data_len += skb->len;
3340 	*tx_pend_data_len = dev->tx_pend_data_len;
3341 
3342 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3343 }
3344 
lan78xx_tx_pend_skb_head_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3345 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3346 					 struct sk_buff *skb,
3347 					 unsigned int *tx_pend_data_len)
3348 {
3349 	unsigned long flags;
3350 
3351 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3352 
3353 	__skb_queue_head(&dev->txq_pend, skb);
3354 
3355 	dev->tx_pend_data_len += skb->len;
3356 	*tx_pend_data_len = dev->tx_pend_data_len;
3357 
3358 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3359 }
3360 
lan78xx_tx_pend_skb_get(struct lan78xx_net * dev,struct sk_buff ** skb,unsigned int * tx_pend_data_len)3361 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3362 				    struct sk_buff **skb,
3363 				    unsigned int *tx_pend_data_len)
3364 {
3365 	unsigned long flags;
3366 
3367 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3368 
3369 	*skb = __skb_dequeue(&dev->txq_pend);
3370 	if (*skb)
3371 		dev->tx_pend_data_len -= (*skb)->len;
3372 	*tx_pend_data_len = dev->tx_pend_data_len;
3373 
3374 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3375 }
3376 
3377 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)3378 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3379 {
3380 	struct lan78xx_net *dev = netdev_priv(net);
3381 	unsigned int tx_pend_data_len;
3382 
3383 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3384 		schedule_delayed_work(&dev->wq, 0);
3385 
3386 	skb_tx_timestamp(skb);
3387 
3388 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3389 
3390 	/* Set up a Tx URB if none is in progress */
3391 
3392 	if (skb_queue_empty(&dev->txq))
3393 		napi_schedule(&dev->napi);
3394 
3395 	/* Stop stack Tx queue if we have enough data to fill
3396 	 * all the free Tx URBs.
3397 	 */
3398 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3399 		netif_stop_queue(net);
3400 
3401 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3402 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3403 
3404 		/* Kick off transmission of pending data */
3405 
3406 		if (!skb_queue_empty(&dev->txq_free))
3407 			napi_schedule(&dev->napi);
3408 	}
3409 
3410 	return NETDEV_TX_OK;
3411 }
3412 
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)3413 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3414 {
3415 	struct lan78xx_priv *pdata = NULL;
3416 	int ret;
3417 	int i;
3418 
3419 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3420 
3421 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3422 	if (!pdata) {
3423 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3424 		return -ENOMEM;
3425 	}
3426 
3427 	pdata->dev = dev;
3428 
3429 	spin_lock_init(&pdata->rfe_ctl_lock);
3430 	mutex_init(&pdata->dataport_mutex);
3431 
3432 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3433 
3434 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3435 		pdata->vlan_table[i] = 0;
3436 
3437 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3438 
3439 	dev->net->features = 0;
3440 
3441 	if (DEFAULT_TX_CSUM_ENABLE)
3442 		dev->net->features |= NETIF_F_HW_CSUM;
3443 
3444 	if (DEFAULT_RX_CSUM_ENABLE)
3445 		dev->net->features |= NETIF_F_RXCSUM;
3446 
3447 	if (DEFAULT_TSO_CSUM_ENABLE)
3448 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3449 
3450 	if (DEFAULT_VLAN_RX_OFFLOAD)
3451 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3452 
3453 	if (DEFAULT_VLAN_FILTER_ENABLE)
3454 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3455 
3456 	dev->net->hw_features = dev->net->features;
3457 
3458 	ret = lan78xx_setup_irq_domain(dev);
3459 	if (ret < 0) {
3460 		netdev_warn(dev->net,
3461 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3462 		goto out1;
3463 	}
3464 
3465 	/* Init all registers */
3466 	ret = lan78xx_reset(dev);
3467 	if (ret) {
3468 		netdev_warn(dev->net, "Registers INIT FAILED....");
3469 		goto out2;
3470 	}
3471 
3472 	ret = lan78xx_mdio_init(dev);
3473 	if (ret) {
3474 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3475 		goto out2;
3476 	}
3477 
3478 	dev->net->flags |= IFF_MULTICAST;
3479 
3480 	pdata->wol = WAKE_MAGIC;
3481 
3482 	return ret;
3483 
3484 out2:
3485 	lan78xx_remove_irq_domain(dev);
3486 
3487 out1:
3488 	netdev_warn(dev->net, "Bind routine FAILED");
3489 	cancel_work_sync(&pdata->set_multicast);
3490 	cancel_work_sync(&pdata->set_vlan);
3491 	kfree(pdata);
3492 	return ret;
3493 }
3494 
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)3495 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3496 {
3497 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3498 
3499 	lan78xx_remove_irq_domain(dev);
3500 
3501 	lan78xx_remove_mdio(dev);
3502 
3503 	if (pdata) {
3504 		cancel_work_sync(&pdata->set_multicast);
3505 		cancel_work_sync(&pdata->set_vlan);
3506 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3507 		kfree(pdata);
3508 		pdata = NULL;
3509 		dev->data[0] = 0;
3510 	}
3511 }
3512 
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3513 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3514 				    struct sk_buff *skb,
3515 				    u32 rx_cmd_a, u32 rx_cmd_b)
3516 {
3517 	/* HW Checksum offload appears to be flawed if used when not stripping
3518 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3519 	 */
3520 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3521 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3522 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3523 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3524 		skb->ip_summed = CHECKSUM_NONE;
3525 	} else {
3526 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3527 		skb->ip_summed = CHECKSUM_COMPLETE;
3528 	}
3529 }
3530 
lan78xx_rx_vlan_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3531 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3532 				    struct sk_buff *skb,
3533 				    u32 rx_cmd_a, u32 rx_cmd_b)
3534 {
3535 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3536 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3537 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3538 				       (rx_cmd_b & 0xffff));
3539 }
3540 
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)3541 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3542 {
3543 	dev->net->stats.rx_packets++;
3544 	dev->net->stats.rx_bytes += skb->len;
3545 
3546 	skb->protocol = eth_type_trans(skb, dev->net);
3547 
3548 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3549 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3550 	memset(skb->cb, 0, sizeof(struct skb_data));
3551 
3552 	if (skb_defer_rx_timestamp(skb))
3553 		return;
3554 
3555 	napi_gro_receive(&dev->napi, skb);
3556 }
3557 
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3558 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3559 		      int budget, int *work_done)
3560 {
3561 	if (skb->len < RX_SKB_MIN_LEN)
3562 		return 0;
3563 
3564 	/* Extract frames from the URB buffer and pass each one to
3565 	 * the stack in a new NAPI SKB.
3566 	 */
3567 	while (skb->len > 0) {
3568 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3569 		u16 rx_cmd_c;
3570 		unsigned char *packet;
3571 
3572 		rx_cmd_a = get_unaligned_le32(skb->data);
3573 		skb_pull(skb, sizeof(rx_cmd_a));
3574 
3575 		rx_cmd_b = get_unaligned_le32(skb->data);
3576 		skb_pull(skb, sizeof(rx_cmd_b));
3577 
3578 		rx_cmd_c = get_unaligned_le16(skb->data);
3579 		skb_pull(skb, sizeof(rx_cmd_c));
3580 
3581 		packet = skb->data;
3582 
3583 		/* get the packet length */
3584 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3585 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3586 
3587 		if (unlikely(size > skb->len)) {
3588 			netif_dbg(dev, rx_err, dev->net,
3589 				  "size err rx_cmd_a=0x%08x\n",
3590 				  rx_cmd_a);
3591 			return 0;
3592 		}
3593 
3594 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3595 			netif_dbg(dev, rx_err, dev->net,
3596 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3597 		} else {
3598 			u32 frame_len;
3599 			struct sk_buff *skb2;
3600 
3601 			if (unlikely(size < ETH_FCS_LEN)) {
3602 				netif_dbg(dev, rx_err, dev->net,
3603 					  "size err rx_cmd_a=0x%08x\n",
3604 					  rx_cmd_a);
3605 				return 0;
3606 			}
3607 
3608 			frame_len = size - ETH_FCS_LEN;
3609 
3610 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3611 			if (!skb2)
3612 				return 0;
3613 
3614 			memcpy(skb2->data, packet, frame_len);
3615 
3616 			skb_put(skb2, frame_len);
3617 
3618 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3619 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3620 
3621 			/* Processing of the URB buffer must complete once
3622 			 * it has started. If the NAPI work budget is exhausted
3623 			 * while frames remain they are added to the overflow
3624 			 * queue for delivery in the next NAPI polling cycle.
3625 			 */
3626 			if (*work_done < budget) {
3627 				lan78xx_skb_return(dev, skb2);
3628 				++(*work_done);
3629 			} else {
3630 				skb_queue_tail(&dev->rxq_overflow, skb2);
3631 			}
3632 		}
3633 
3634 		skb_pull(skb, size);
3635 
3636 		/* skip padding bytes before the next frame starts */
3637 		if (skb->len)
3638 			skb_pull(skb, align_count);
3639 	}
3640 
3641 	return 1;
3642 }
3643 
rx_process(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3644 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3645 			      int budget, int *work_done)
3646 {
3647 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3648 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3649 		dev->net->stats.rx_errors++;
3650 	}
3651 }
3652 
rx_complete(struct urb * urb)3653 static void rx_complete(struct urb *urb)
3654 {
3655 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3656 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3657 	struct lan78xx_net *dev = entry->dev;
3658 	int urb_status = urb->status;
3659 	enum skb_state state;
3660 
3661 	netif_dbg(dev, rx_status, dev->net,
3662 		  "rx done: status %d", urb->status);
3663 
3664 	skb_put(skb, urb->actual_length);
3665 	state = rx_done;
3666 
3667 	if (urb != entry->urb)
3668 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3669 
3670 	switch (urb_status) {
3671 	case 0:
3672 		if (skb->len < RX_SKB_MIN_LEN) {
3673 			state = rx_cleanup;
3674 			dev->net->stats.rx_errors++;
3675 			dev->net->stats.rx_length_errors++;
3676 			netif_dbg(dev, rx_err, dev->net,
3677 				  "rx length %d\n", skb->len);
3678 		}
3679 		usb_mark_last_busy(dev->udev);
3680 		break;
3681 	case -EPIPE:
3682 		dev->net->stats.rx_errors++;
3683 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3684 		fallthrough;
3685 	case -ECONNRESET:				/* async unlink */
3686 	case -ESHUTDOWN:				/* hardware gone */
3687 		netif_dbg(dev, ifdown, dev->net,
3688 			  "rx shutdown, code %d\n", urb_status);
3689 		state = rx_cleanup;
3690 		break;
3691 	case -EPROTO:
3692 	case -ETIME:
3693 	case -EILSEQ:
3694 		dev->net->stats.rx_errors++;
3695 		state = rx_cleanup;
3696 		break;
3697 
3698 	/* data overrun ... flush fifo? */
3699 	case -EOVERFLOW:
3700 		dev->net->stats.rx_over_errors++;
3701 		fallthrough;
3702 
3703 	default:
3704 		state = rx_cleanup;
3705 		dev->net->stats.rx_errors++;
3706 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3707 		break;
3708 	}
3709 
3710 	state = defer_bh(dev, skb, &dev->rxq, state);
3711 }
3712 
rx_submit(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)3713 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3714 {
3715 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3716 	size_t size = dev->rx_urb_size;
3717 	struct urb *urb = entry->urb;
3718 	unsigned long lockflags;
3719 	int ret = 0;
3720 
3721 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3722 			  skb->data, size, rx_complete, skb);
3723 
3724 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3725 
3726 	if (netif_device_present(dev->net) &&
3727 	    netif_running(dev->net) &&
3728 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3729 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3730 		ret = usb_submit_urb(urb, flags);
3731 		switch (ret) {
3732 		case 0:
3733 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3734 			break;
3735 		case -EPIPE:
3736 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3737 			break;
3738 		case -ENODEV:
3739 		case -ENOENT:
3740 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3741 			netif_device_detach(dev->net);
3742 			break;
3743 		case -EHOSTUNREACH:
3744 			ret = -ENOLINK;
3745 			napi_schedule(&dev->napi);
3746 			break;
3747 		default:
3748 			netif_dbg(dev, rx_err, dev->net,
3749 				  "rx submit, %d\n", ret);
3750 			napi_schedule(&dev->napi);
3751 			break;
3752 		}
3753 	} else {
3754 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3755 		ret = -ENOLINK;
3756 	}
3757 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3758 
3759 	if (ret)
3760 		lan78xx_release_rx_buf(dev, skb);
3761 
3762 	return ret;
3763 }
3764 
lan78xx_rx_urb_submit_all(struct lan78xx_net * dev)3765 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3766 {
3767 	struct sk_buff *rx_buf;
3768 
3769 	/* Ensure the maximum number of Rx URBs is submitted
3770 	 */
3771 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3772 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3773 			break;
3774 	}
3775 }
3776 
lan78xx_rx_urb_resubmit(struct lan78xx_net * dev,struct sk_buff * rx_buf)3777 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3778 				    struct sk_buff *rx_buf)
3779 {
3780 	/* reset SKB data pointers */
3781 
3782 	rx_buf->data = rx_buf->head;
3783 	skb_reset_tail_pointer(rx_buf);
3784 	rx_buf->len = 0;
3785 	rx_buf->data_len = 0;
3786 
3787 	rx_submit(dev, rx_buf, GFP_ATOMIC);
3788 }
3789 
lan78xx_fill_tx_cmd_words(struct sk_buff * skb,u8 * buffer)3790 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3791 {
3792 	u32 tx_cmd_a;
3793 	u32 tx_cmd_b;
3794 
3795 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3796 
3797 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3798 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3799 
3800 	tx_cmd_b = 0;
3801 	if (skb_is_gso(skb)) {
3802 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3803 
3804 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3805 
3806 		tx_cmd_a |= TX_CMD_A_LSO_;
3807 	}
3808 
3809 	if (skb_vlan_tag_present(skb)) {
3810 		tx_cmd_a |= TX_CMD_A_IVTG_;
3811 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3812 	}
3813 
3814 	put_unaligned_le32(tx_cmd_a, buffer);
3815 	put_unaligned_le32(tx_cmd_b, buffer + 4);
3816 }
3817 
lan78xx_tx_buf_fill(struct lan78xx_net * dev,struct sk_buff * tx_buf)3818 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3819 					    struct sk_buff *tx_buf)
3820 {
3821 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3822 	int remain = dev->tx_urb_size;
3823 	u8 *tx_data = tx_buf->data;
3824 	u32 urb_len = 0;
3825 
3826 	entry->num_of_packet = 0;
3827 	entry->length = 0;
3828 
3829 	/* Work through the pending SKBs and copy the data of each SKB into
3830 	 * the URB buffer if there room for all the SKB data.
3831 	 *
3832 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3833 	 */
3834 	while (remain >= TX_SKB_MIN_LEN) {
3835 		unsigned int pending_bytes;
3836 		unsigned int align_bytes;
3837 		struct sk_buff *skb;
3838 		unsigned int len;
3839 
3840 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3841 
3842 		if (!skb)
3843 			break;
3844 
3845 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3846 			      TX_ALIGNMENT;
3847 		len = align_bytes + TX_CMD_LEN + skb->len;
3848 		if (len > remain) {
3849 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3850 			break;
3851 		}
3852 
3853 		tx_data += align_bytes;
3854 
3855 		lan78xx_fill_tx_cmd_words(skb, tx_data);
3856 		tx_data += TX_CMD_LEN;
3857 
3858 		len = skb->len;
3859 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3860 			struct net_device_stats *stats = &dev->net->stats;
3861 
3862 			stats->tx_dropped++;
3863 			dev_kfree_skb_any(skb);
3864 			tx_data -= TX_CMD_LEN;
3865 			continue;
3866 		}
3867 
3868 		tx_data += len;
3869 		entry->length += len;
3870 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3871 
3872 		dev_kfree_skb_any(skb);
3873 
3874 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3875 
3876 		remain = dev->tx_urb_size - urb_len;
3877 	}
3878 
3879 	skb_put(tx_buf, urb_len);
3880 
3881 	return entry;
3882 }
3883 
lan78xx_tx_bh(struct lan78xx_net * dev)3884 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3885 {
3886 	int ret;
3887 
3888 	/* Start the stack Tx queue if it was stopped
3889 	 */
3890 	netif_tx_lock(dev->net);
3891 	if (netif_queue_stopped(dev->net)) {
3892 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3893 			netif_wake_queue(dev->net);
3894 	}
3895 	netif_tx_unlock(dev->net);
3896 
3897 	/* Go through the Tx pending queue and set up URBs to transfer
3898 	 * the data to the device. Stop if no more pending data or URBs,
3899 	 * or if an error occurs when a URB is submitted.
3900 	 */
3901 	do {
3902 		struct skb_data *entry;
3903 		struct sk_buff *tx_buf;
3904 		unsigned long flags;
3905 
3906 		if (skb_queue_empty(&dev->txq_pend))
3907 			break;
3908 
3909 		tx_buf = lan78xx_get_tx_buf(dev);
3910 		if (!tx_buf)
3911 			break;
3912 
3913 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
3914 
3915 		spin_lock_irqsave(&dev->txq.lock, flags);
3916 		ret = usb_autopm_get_interface_async(dev->intf);
3917 		if (ret < 0) {
3918 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3919 			goto out;
3920 		}
3921 
3922 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
3923 				  tx_buf->data, tx_buf->len, tx_complete,
3924 				  tx_buf);
3925 
3926 		if (tx_buf->len % dev->maxpacket == 0) {
3927 			/* send USB_ZERO_PACKET */
3928 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
3929 		}
3930 
3931 #ifdef CONFIG_PM
3932 		/* if device is asleep stop outgoing packet processing */
3933 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3934 			usb_anchor_urb(entry->urb, &dev->deferred);
3935 			netif_stop_queue(dev->net);
3936 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3937 			netdev_dbg(dev->net,
3938 				   "Delaying transmission for resumption\n");
3939 			return;
3940 		}
3941 #endif
3942 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
3943 		switch (ret) {
3944 		case 0:
3945 			netif_trans_update(dev->net);
3946 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
3947 			break;
3948 		case -EPIPE:
3949 			netif_stop_queue(dev->net);
3950 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3951 			usb_autopm_put_interface_async(dev->intf);
3952 			break;
3953 		case -ENODEV:
3954 		case -ENOENT:
3955 			netif_dbg(dev, tx_err, dev->net,
3956 				  "tx submit urb err %d (disconnected?)", ret);
3957 			netif_device_detach(dev->net);
3958 			break;
3959 		default:
3960 			usb_autopm_put_interface_async(dev->intf);
3961 			netif_dbg(dev, tx_err, dev->net,
3962 				  "tx submit urb err %d\n", ret);
3963 			break;
3964 		}
3965 
3966 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3967 
3968 		if (ret) {
3969 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
3970 out:
3971 			dev->net->stats.tx_dropped += entry->num_of_packet;
3972 			lan78xx_release_tx_buf(dev, tx_buf);
3973 		}
3974 	} while (ret == 0);
3975 }
3976 
lan78xx_bh(struct lan78xx_net * dev,int budget)3977 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
3978 {
3979 	struct sk_buff_head done;
3980 	struct sk_buff *rx_buf;
3981 	struct skb_data *entry;
3982 	unsigned long flags;
3983 	int work_done = 0;
3984 
3985 	/* Pass frames received in the last NAPI cycle before
3986 	 * working on newly completed URBs.
3987 	 */
3988 	while (!skb_queue_empty(&dev->rxq_overflow)) {
3989 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
3990 		++work_done;
3991 	}
3992 
3993 	/* Take a snapshot of the done queue and move items to a
3994 	 * temporary queue. Rx URB completions will continue to add
3995 	 * to the done queue.
3996 	 */
3997 	__skb_queue_head_init(&done);
3998 
3999 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4000 	skb_queue_splice_init(&dev->rxq_done, &done);
4001 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4002 
4003 	/* Extract receive frames from completed URBs and
4004 	 * pass them to the stack. Re-submit each completed URB.
4005 	 */
4006 	while ((work_done < budget) &&
4007 	       (rx_buf = __skb_dequeue(&done))) {
4008 		entry = (struct skb_data *)(rx_buf->cb);
4009 		switch (entry->state) {
4010 		case rx_done:
4011 			rx_process(dev, rx_buf, budget, &work_done);
4012 			break;
4013 		case rx_cleanup:
4014 			break;
4015 		default:
4016 			netdev_dbg(dev->net, "rx buf state %d\n",
4017 				   entry->state);
4018 			break;
4019 		}
4020 
4021 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4022 	}
4023 
4024 	/* If budget was consumed before processing all the URBs put them
4025 	 * back on the front of the done queue. They will be first to be
4026 	 * processed in the next NAPI cycle.
4027 	 */
4028 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4029 	skb_queue_splice(&done, &dev->rxq_done);
4030 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4031 
4032 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4033 		/* reset update timer delta */
4034 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4035 			dev->delta = 1;
4036 			mod_timer(&dev->stat_monitor,
4037 				  jiffies + STAT_UPDATE_TIMER);
4038 		}
4039 
4040 		/* Submit all free Rx URBs */
4041 
4042 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4043 			lan78xx_rx_urb_submit_all(dev);
4044 
4045 		/* Submit new Tx URBs */
4046 
4047 		lan78xx_tx_bh(dev);
4048 	}
4049 
4050 	return work_done;
4051 }
4052 
lan78xx_poll(struct napi_struct * napi,int budget)4053 static int lan78xx_poll(struct napi_struct *napi, int budget)
4054 {
4055 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4056 	int result = budget;
4057 	int work_done;
4058 
4059 	/* Don't do any work if the device is suspended */
4060 
4061 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4062 		napi_complete_done(napi, 0);
4063 		return 0;
4064 	}
4065 
4066 	/* Process completed URBs and submit new URBs */
4067 
4068 	work_done = lan78xx_bh(dev, budget);
4069 
4070 	if (work_done < budget) {
4071 		napi_complete_done(napi, work_done);
4072 
4073 		/* Start a new polling cycle if data was received or
4074 		 * data is waiting to be transmitted.
4075 		 */
4076 		if (!skb_queue_empty(&dev->rxq_done)) {
4077 			napi_schedule(napi);
4078 		} else if (netif_carrier_ok(dev->net)) {
4079 			if (skb_queue_empty(&dev->txq) &&
4080 			    !skb_queue_empty(&dev->txq_pend)) {
4081 				napi_schedule(napi);
4082 			} else {
4083 				netif_tx_lock(dev->net);
4084 				if (netif_queue_stopped(dev->net)) {
4085 					netif_wake_queue(dev->net);
4086 					napi_schedule(napi);
4087 				}
4088 				netif_tx_unlock(dev->net);
4089 			}
4090 		}
4091 		result = work_done;
4092 	}
4093 
4094 	return result;
4095 }
4096 
lan78xx_delayedwork(struct work_struct * work)4097 static void lan78xx_delayedwork(struct work_struct *work)
4098 {
4099 	int status;
4100 	struct lan78xx_net *dev;
4101 
4102 	dev = container_of(work, struct lan78xx_net, wq.work);
4103 
4104 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4105 		return;
4106 
4107 	if (usb_autopm_get_interface(dev->intf) < 0)
4108 		return;
4109 
4110 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4111 		unlink_urbs(dev, &dev->txq);
4112 
4113 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4114 		if (status < 0 &&
4115 		    status != -EPIPE &&
4116 		    status != -ESHUTDOWN) {
4117 			if (netif_msg_tx_err(dev))
4118 				netdev_err(dev->net,
4119 					   "can't clear tx halt, status %d\n",
4120 					   status);
4121 		} else {
4122 			clear_bit(EVENT_TX_HALT, &dev->flags);
4123 			if (status != -ESHUTDOWN)
4124 				netif_wake_queue(dev->net);
4125 		}
4126 	}
4127 
4128 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4129 		unlink_urbs(dev, &dev->rxq);
4130 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4131 		if (status < 0 &&
4132 		    status != -EPIPE &&
4133 		    status != -ESHUTDOWN) {
4134 			if (netif_msg_rx_err(dev))
4135 				netdev_err(dev->net,
4136 					   "can't clear rx halt, status %d\n",
4137 					   status);
4138 		} else {
4139 			clear_bit(EVENT_RX_HALT, &dev->flags);
4140 			napi_schedule(&dev->napi);
4141 		}
4142 	}
4143 
4144 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4145 		int ret = 0;
4146 
4147 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4148 		if (lan78xx_link_reset(dev) < 0) {
4149 			netdev_info(dev->net, "link reset failed (%d)\n",
4150 				    ret);
4151 		}
4152 	}
4153 
4154 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4155 		lan78xx_update_stats(dev);
4156 
4157 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4158 
4159 		mod_timer(&dev->stat_monitor,
4160 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4161 
4162 		dev->delta = min((dev->delta * 2), 50);
4163 	}
4164 
4165 	usb_autopm_put_interface(dev->intf);
4166 }
4167 
intr_complete(struct urb * urb)4168 static void intr_complete(struct urb *urb)
4169 {
4170 	struct lan78xx_net *dev = urb->context;
4171 	int status = urb->status;
4172 
4173 	switch (status) {
4174 	/* success */
4175 	case 0:
4176 		lan78xx_status(dev, urb);
4177 		break;
4178 
4179 	/* software-driven interface shutdown */
4180 	case -ENOENT:			/* urb killed */
4181 	case -ENODEV:			/* hardware gone */
4182 	case -ESHUTDOWN:		/* hardware gone */
4183 		netif_dbg(dev, ifdown, dev->net,
4184 			  "intr shutdown, code %d\n", status);
4185 		return;
4186 
4187 	/* NOTE:  not throttling like RX/TX, since this endpoint
4188 	 * already polls infrequently
4189 	 */
4190 	default:
4191 		netdev_dbg(dev->net, "intr status %d\n", status);
4192 		break;
4193 	}
4194 
4195 	if (!netif_device_present(dev->net) ||
4196 	    !netif_running(dev->net)) {
4197 		netdev_warn(dev->net, "not submitting new status URB");
4198 		return;
4199 	}
4200 
4201 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4202 	status = usb_submit_urb(urb, GFP_ATOMIC);
4203 
4204 	switch (status) {
4205 	case  0:
4206 		break;
4207 	case -ENODEV:
4208 	case -ENOENT:
4209 		netif_dbg(dev, timer, dev->net,
4210 			  "intr resubmit %d (disconnect?)", status);
4211 		netif_device_detach(dev->net);
4212 		break;
4213 	default:
4214 		netif_err(dev, timer, dev->net,
4215 			  "intr resubmit --> %d\n", status);
4216 		break;
4217 	}
4218 }
4219 
lan78xx_disconnect(struct usb_interface * intf)4220 static void lan78xx_disconnect(struct usb_interface *intf)
4221 {
4222 	struct lan78xx_net *dev;
4223 	struct usb_device *udev;
4224 	struct net_device *net;
4225 	struct phy_device *phydev;
4226 
4227 	dev = usb_get_intfdata(intf);
4228 	usb_set_intfdata(intf, NULL);
4229 	if (!dev)
4230 		return;
4231 
4232 	netif_napi_del(&dev->napi);
4233 
4234 	udev = interface_to_usbdev(intf);
4235 	net = dev->net;
4236 
4237 	unregister_netdev(net);
4238 
4239 	timer_shutdown_sync(&dev->stat_monitor);
4240 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4241 	cancel_delayed_work_sync(&dev->wq);
4242 
4243 	phydev = net->phydev;
4244 
4245 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4246 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4247 
4248 	phy_disconnect(net->phydev);
4249 
4250 	if (phy_is_pseudo_fixed_link(phydev)) {
4251 		fixed_phy_unregister(phydev);
4252 		phy_device_free(phydev);
4253 	}
4254 
4255 	usb_scuttle_anchored_urbs(&dev->deferred);
4256 
4257 	lan78xx_unbind(dev, intf);
4258 
4259 	lan78xx_free_tx_resources(dev);
4260 	lan78xx_free_rx_resources(dev);
4261 
4262 	usb_kill_urb(dev->urb_intr);
4263 	usb_free_urb(dev->urb_intr);
4264 
4265 	free_netdev(net);
4266 	usb_put_dev(udev);
4267 }
4268 
lan78xx_tx_timeout(struct net_device * net,unsigned int txqueue)4269 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4270 {
4271 	struct lan78xx_net *dev = netdev_priv(net);
4272 
4273 	unlink_urbs(dev, &dev->txq);
4274 	napi_schedule(&dev->napi);
4275 }
4276 
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)4277 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4278 						struct net_device *netdev,
4279 						netdev_features_t features)
4280 {
4281 	struct lan78xx_net *dev = netdev_priv(netdev);
4282 
4283 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4284 		features &= ~NETIF_F_GSO_MASK;
4285 
4286 	features = vlan_features_check(skb, features);
4287 	features = vxlan_features_check(skb, features);
4288 
4289 	return features;
4290 }
4291 
4292 static const struct net_device_ops lan78xx_netdev_ops = {
4293 	.ndo_open		= lan78xx_open,
4294 	.ndo_stop		= lan78xx_stop,
4295 	.ndo_start_xmit		= lan78xx_start_xmit,
4296 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4297 	.ndo_change_mtu		= lan78xx_change_mtu,
4298 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4299 	.ndo_validate_addr	= eth_validate_addr,
4300 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4301 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4302 	.ndo_set_features	= lan78xx_set_features,
4303 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4304 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4305 	.ndo_features_check	= lan78xx_features_check,
4306 };
4307 
lan78xx_stat_monitor(struct timer_list * t)4308 static void lan78xx_stat_monitor(struct timer_list *t)
4309 {
4310 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4311 
4312 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4313 }
4314 
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)4315 static int lan78xx_probe(struct usb_interface *intf,
4316 			 const struct usb_device_id *id)
4317 {
4318 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4319 	struct lan78xx_net *dev;
4320 	struct net_device *netdev;
4321 	struct usb_device *udev;
4322 	int ret;
4323 	unsigned int maxp;
4324 	unsigned int period;
4325 	u8 *buf = NULL;
4326 
4327 	udev = interface_to_usbdev(intf);
4328 	udev = usb_get_dev(udev);
4329 
4330 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4331 	if (!netdev) {
4332 		dev_err(&intf->dev, "Error: OOM\n");
4333 		ret = -ENOMEM;
4334 		goto out1;
4335 	}
4336 
4337 	/* netdev_printk() needs this */
4338 	SET_NETDEV_DEV(netdev, &intf->dev);
4339 
4340 	dev = netdev_priv(netdev);
4341 	dev->udev = udev;
4342 	dev->intf = intf;
4343 	dev->net = netdev;
4344 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4345 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4346 
4347 	skb_queue_head_init(&dev->rxq);
4348 	skb_queue_head_init(&dev->txq);
4349 	skb_queue_head_init(&dev->rxq_done);
4350 	skb_queue_head_init(&dev->txq_pend);
4351 	skb_queue_head_init(&dev->rxq_overflow);
4352 	mutex_init(&dev->phy_mutex);
4353 	mutex_init(&dev->dev_mutex);
4354 
4355 	ret = lan78xx_urb_config_init(dev);
4356 	if (ret < 0)
4357 		goto out2;
4358 
4359 	ret = lan78xx_alloc_tx_resources(dev);
4360 	if (ret < 0)
4361 		goto out2;
4362 
4363 	ret = lan78xx_alloc_rx_resources(dev);
4364 	if (ret < 0)
4365 		goto out3;
4366 
4367 	/* MTU range: 68 - 9000 */
4368 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4369 
4370 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4371 
4372 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4373 
4374 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4375 	init_usb_anchor(&dev->deferred);
4376 
4377 	netdev->netdev_ops = &lan78xx_netdev_ops;
4378 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4379 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4380 
4381 	dev->delta = 1;
4382 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4383 
4384 	mutex_init(&dev->stats.access_lock);
4385 
4386 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4387 		ret = -ENODEV;
4388 		goto out4;
4389 	}
4390 
4391 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4392 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4393 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4394 		ret = -ENODEV;
4395 		goto out4;
4396 	}
4397 
4398 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4399 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4400 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4401 		ret = -ENODEV;
4402 		goto out4;
4403 	}
4404 
4405 	ep_intr = &intf->cur_altsetting->endpoint[2];
4406 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4407 		ret = -ENODEV;
4408 		goto out4;
4409 	}
4410 
4411 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4412 					usb_endpoint_num(&ep_intr->desc));
4413 
4414 	ret = lan78xx_bind(dev, intf);
4415 	if (ret < 0)
4416 		goto out4;
4417 
4418 	period = ep_intr->desc.bInterval;
4419 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4420 
4421 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4422 	if (!dev->urb_intr) {
4423 		ret = -ENOMEM;
4424 		goto out5;
4425 	}
4426 
4427 	buf = kmalloc(maxp, GFP_KERNEL);
4428 	if (!buf) {
4429 		ret = -ENOMEM;
4430 		goto free_urbs;
4431 	}
4432 
4433 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4434 			 dev->pipe_intr, buf, maxp,
4435 			 intr_complete, dev, period);
4436 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4437 
4438 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4439 
4440 	/* Reject broken descriptors. */
4441 	if (dev->maxpacket == 0) {
4442 		ret = -ENODEV;
4443 		goto free_urbs;
4444 	}
4445 
4446 	/* driver requires remote-wakeup capability during autosuspend. */
4447 	intf->needs_remote_wakeup = 1;
4448 
4449 	ret = lan78xx_phy_init(dev);
4450 	if (ret < 0)
4451 		goto free_urbs;
4452 
4453 	ret = register_netdev(netdev);
4454 	if (ret != 0) {
4455 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4456 		goto out8;
4457 	}
4458 
4459 	usb_set_intfdata(intf, dev);
4460 
4461 	ret = device_set_wakeup_enable(&udev->dev, true);
4462 
4463 	 /* Default delay of 2sec has more overhead than advantage.
4464 	  * Set to 10sec as default.
4465 	  */
4466 	pm_runtime_set_autosuspend_delay(&udev->dev,
4467 					 DEFAULT_AUTOSUSPEND_DELAY);
4468 
4469 	return 0;
4470 
4471 out8:
4472 	phy_disconnect(netdev->phydev);
4473 free_urbs:
4474 	usb_free_urb(dev->urb_intr);
4475 out5:
4476 	lan78xx_unbind(dev, intf);
4477 out4:
4478 	netif_napi_del(&dev->napi);
4479 	lan78xx_free_rx_resources(dev);
4480 out3:
4481 	lan78xx_free_tx_resources(dev);
4482 out2:
4483 	free_netdev(netdev);
4484 out1:
4485 	usb_put_dev(udev);
4486 
4487 	return ret;
4488 }
4489 
lan78xx_wakeframe_crc16(const u8 * buf,int len)4490 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4491 {
4492 	const u16 crc16poly = 0x8005;
4493 	int i;
4494 	u16 bit, crc, msb;
4495 	u8 data;
4496 
4497 	crc = 0xFFFF;
4498 	for (i = 0; i < len; i++) {
4499 		data = *buf++;
4500 		for (bit = 0; bit < 8; bit++) {
4501 			msb = crc >> 15;
4502 			crc <<= 1;
4503 
4504 			if (msb ^ (u16)(data & 1)) {
4505 				crc ^= crc16poly;
4506 				crc |= (u16)0x0001U;
4507 			}
4508 			data >>= 1;
4509 		}
4510 	}
4511 
4512 	return crc;
4513 }
4514 
lan78xx_set_auto_suspend(struct lan78xx_net * dev)4515 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4516 {
4517 	u32 buf;
4518 	int ret;
4519 
4520 	ret = lan78xx_stop_tx_path(dev);
4521 	if (ret < 0)
4522 		return ret;
4523 
4524 	ret = lan78xx_stop_rx_path(dev);
4525 	if (ret < 0)
4526 		return ret;
4527 
4528 	/* auto suspend (selective suspend) */
4529 
4530 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4531 	if (ret < 0)
4532 		return ret;
4533 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4534 	if (ret < 0)
4535 		return ret;
4536 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4537 	if (ret < 0)
4538 		return ret;
4539 
4540 	/* set goodframe wakeup */
4541 
4542 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4543 	if (ret < 0)
4544 		return ret;
4545 
4546 	buf |= WUCSR_RFE_WAKE_EN_;
4547 	buf |= WUCSR_STORE_WAKE_;
4548 
4549 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4550 	if (ret < 0)
4551 		return ret;
4552 
4553 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4554 	if (ret < 0)
4555 		return ret;
4556 
4557 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4558 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4559 	buf |= PMT_CTL_PHY_WAKE_EN_;
4560 	buf |= PMT_CTL_WOL_EN_;
4561 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4562 	buf |= PMT_CTL_SUS_MODE_3_;
4563 
4564 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4565 	if (ret < 0)
4566 		return ret;
4567 
4568 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4569 	if (ret < 0)
4570 		return ret;
4571 
4572 	buf |= PMT_CTL_WUPS_MASK_;
4573 
4574 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4575 	if (ret < 0)
4576 		return ret;
4577 
4578 	ret = lan78xx_start_rx_path(dev);
4579 
4580 	return ret;
4581 }
4582 
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)4583 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4584 {
4585 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4586 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4587 	const u8 arp_type[2] = { 0x08, 0x06 };
4588 	u32 temp_pmt_ctl;
4589 	int mask_index;
4590 	u32 temp_wucsr;
4591 	u32 buf;
4592 	u16 crc;
4593 	int ret;
4594 
4595 	ret = lan78xx_stop_tx_path(dev);
4596 	if (ret < 0)
4597 		return ret;
4598 	ret = lan78xx_stop_rx_path(dev);
4599 	if (ret < 0)
4600 		return ret;
4601 
4602 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4603 	if (ret < 0)
4604 		return ret;
4605 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4606 	if (ret < 0)
4607 		return ret;
4608 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4609 	if (ret < 0)
4610 		return ret;
4611 
4612 	temp_wucsr = 0;
4613 
4614 	temp_pmt_ctl = 0;
4615 
4616 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4617 	if (ret < 0)
4618 		return ret;
4619 
4620 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4621 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4622 
4623 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4624 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4625 		if (ret < 0)
4626 			return ret;
4627 	}
4628 
4629 	mask_index = 0;
4630 	if (wol & WAKE_PHY) {
4631 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4632 
4633 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4634 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4635 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4636 	}
4637 	if (wol & WAKE_MAGIC) {
4638 		temp_wucsr |= WUCSR_MPEN_;
4639 
4640 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4641 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4642 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4643 	}
4644 	if (wol & WAKE_BCAST) {
4645 		temp_wucsr |= WUCSR_BCST_EN_;
4646 
4647 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4648 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4649 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4650 	}
4651 	if (wol & WAKE_MCAST) {
4652 		temp_wucsr |= WUCSR_WAKE_EN_;
4653 
4654 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4655 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4656 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4657 					WUF_CFGX_EN_ |
4658 					WUF_CFGX_TYPE_MCAST_ |
4659 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4660 					(crc & WUF_CFGX_CRC16_MASK_));
4661 		if (ret < 0)
4662 			return ret;
4663 
4664 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4665 		if (ret < 0)
4666 			return ret;
4667 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4668 		if (ret < 0)
4669 			return ret;
4670 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4671 		if (ret < 0)
4672 			return ret;
4673 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4674 		if (ret < 0)
4675 			return ret;
4676 
4677 		mask_index++;
4678 
4679 		/* for IPv6 Multicast */
4680 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4681 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4682 					WUF_CFGX_EN_ |
4683 					WUF_CFGX_TYPE_MCAST_ |
4684 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4685 					(crc & WUF_CFGX_CRC16_MASK_));
4686 		if (ret < 0)
4687 			return ret;
4688 
4689 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4690 		if (ret < 0)
4691 			return ret;
4692 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4693 		if (ret < 0)
4694 			return ret;
4695 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4696 		if (ret < 0)
4697 			return ret;
4698 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4699 		if (ret < 0)
4700 			return ret;
4701 
4702 		mask_index++;
4703 
4704 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4705 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4706 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4707 	}
4708 	if (wol & WAKE_UCAST) {
4709 		temp_wucsr |= WUCSR_PFDA_EN_;
4710 
4711 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4712 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4713 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4714 	}
4715 	if (wol & WAKE_ARP) {
4716 		temp_wucsr |= WUCSR_WAKE_EN_;
4717 
4718 		/* set WUF_CFG & WUF_MASK
4719 		 * for packettype (offset 12,13) = ARP (0x0806)
4720 		 */
4721 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4722 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4723 					WUF_CFGX_EN_ |
4724 					WUF_CFGX_TYPE_ALL_ |
4725 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4726 					(crc & WUF_CFGX_CRC16_MASK_));
4727 		if (ret < 0)
4728 			return ret;
4729 
4730 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4731 		if (ret < 0)
4732 			return ret;
4733 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4734 		if (ret < 0)
4735 			return ret;
4736 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4737 		if (ret < 0)
4738 			return ret;
4739 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4740 		if (ret < 0)
4741 			return ret;
4742 
4743 		mask_index++;
4744 
4745 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4746 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4747 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4748 	}
4749 
4750 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4751 	if (ret < 0)
4752 		return ret;
4753 
4754 	/* when multiple WOL bits are set */
4755 	if (hweight_long((unsigned long)wol) > 1) {
4756 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4757 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4758 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4759 	}
4760 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4761 	if (ret < 0)
4762 		return ret;
4763 
4764 	/* clear WUPS */
4765 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4766 	if (ret < 0)
4767 		return ret;
4768 
4769 	buf |= PMT_CTL_WUPS_MASK_;
4770 
4771 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4772 	if (ret < 0)
4773 		return ret;
4774 
4775 	ret = lan78xx_start_rx_path(dev);
4776 
4777 	return ret;
4778 }
4779 
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)4780 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4781 {
4782 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4783 	bool dev_open;
4784 	int ret;
4785 
4786 	mutex_lock(&dev->dev_mutex);
4787 
4788 	netif_dbg(dev, ifdown, dev->net,
4789 		  "suspending: pm event %#x", message.event);
4790 
4791 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4792 
4793 	if (dev_open) {
4794 		spin_lock_irq(&dev->txq.lock);
4795 		/* don't autosuspend while transmitting */
4796 		if ((skb_queue_len(&dev->txq) ||
4797 		     skb_queue_len(&dev->txq_pend)) &&
4798 		    PMSG_IS_AUTO(message)) {
4799 			spin_unlock_irq(&dev->txq.lock);
4800 			ret = -EBUSY;
4801 			goto out;
4802 		} else {
4803 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4804 			spin_unlock_irq(&dev->txq.lock);
4805 		}
4806 
4807 		/* stop RX */
4808 		ret = lan78xx_stop_rx_path(dev);
4809 		if (ret < 0)
4810 			goto out;
4811 
4812 		ret = lan78xx_flush_rx_fifo(dev);
4813 		if (ret < 0)
4814 			goto out;
4815 
4816 		/* stop Tx */
4817 		ret = lan78xx_stop_tx_path(dev);
4818 		if (ret < 0)
4819 			goto out;
4820 
4821 		/* empty out the Rx and Tx queues */
4822 		netif_device_detach(dev->net);
4823 		lan78xx_terminate_urbs(dev);
4824 		usb_kill_urb(dev->urb_intr);
4825 
4826 		/* reattach */
4827 		netif_device_attach(dev->net);
4828 
4829 		del_timer(&dev->stat_monitor);
4830 
4831 		if (PMSG_IS_AUTO(message)) {
4832 			ret = lan78xx_set_auto_suspend(dev);
4833 			if (ret < 0)
4834 				goto out;
4835 		} else {
4836 			struct lan78xx_priv *pdata;
4837 
4838 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4839 			netif_carrier_off(dev->net);
4840 			ret = lan78xx_set_suspend(dev, pdata->wol);
4841 			if (ret < 0)
4842 				goto out;
4843 		}
4844 	} else {
4845 		/* Interface is down; don't allow WOL and PHY
4846 		 * events to wake up the host
4847 		 */
4848 		u32 buf;
4849 
4850 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4851 
4852 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4853 		if (ret < 0)
4854 			goto out;
4855 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4856 		if (ret < 0)
4857 			goto out;
4858 
4859 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4860 		if (ret < 0)
4861 			goto out;
4862 
4863 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4864 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4865 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4866 		buf |= PMT_CTL_SUS_MODE_3_;
4867 
4868 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4869 		if (ret < 0)
4870 			goto out;
4871 
4872 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4873 		if (ret < 0)
4874 			goto out;
4875 
4876 		buf |= PMT_CTL_WUPS_MASK_;
4877 
4878 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4879 		if (ret < 0)
4880 			goto out;
4881 	}
4882 
4883 	ret = 0;
4884 out:
4885 	mutex_unlock(&dev->dev_mutex);
4886 
4887 	return ret;
4888 }
4889 
lan78xx_submit_deferred_urbs(struct lan78xx_net * dev)4890 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4891 {
4892 	bool pipe_halted = false;
4893 	struct urb *urb;
4894 
4895 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4896 		struct sk_buff *skb = urb->context;
4897 		int ret;
4898 
4899 		if (!netif_device_present(dev->net) ||
4900 		    !netif_carrier_ok(dev->net) ||
4901 		    pipe_halted) {
4902 			lan78xx_release_tx_buf(dev, skb);
4903 			continue;
4904 		}
4905 
4906 		ret = usb_submit_urb(urb, GFP_ATOMIC);
4907 
4908 		if (ret == 0) {
4909 			netif_trans_update(dev->net);
4910 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4911 		} else {
4912 			if (ret == -EPIPE) {
4913 				netif_stop_queue(dev->net);
4914 				pipe_halted = true;
4915 			} else if (ret == -ENODEV) {
4916 				netif_device_detach(dev->net);
4917 			}
4918 
4919 			lan78xx_release_tx_buf(dev, skb);
4920 		}
4921 	}
4922 
4923 	return pipe_halted;
4924 }
4925 
lan78xx_resume(struct usb_interface * intf)4926 static int lan78xx_resume(struct usb_interface *intf)
4927 {
4928 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4929 	bool dev_open;
4930 	int ret;
4931 
4932 	mutex_lock(&dev->dev_mutex);
4933 
4934 	netif_dbg(dev, ifup, dev->net, "resuming device");
4935 
4936 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4937 
4938 	if (dev_open) {
4939 		bool pipe_halted = false;
4940 
4941 		ret = lan78xx_flush_tx_fifo(dev);
4942 		if (ret < 0)
4943 			goto out;
4944 
4945 		if (dev->urb_intr) {
4946 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4947 
4948 			if (ret < 0) {
4949 				if (ret == -ENODEV)
4950 					netif_device_detach(dev->net);
4951 				netdev_warn(dev->net, "Failed to submit intr URB");
4952 			}
4953 		}
4954 
4955 		spin_lock_irq(&dev->txq.lock);
4956 
4957 		if (netif_device_present(dev->net)) {
4958 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
4959 
4960 			if (pipe_halted)
4961 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4962 		}
4963 
4964 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4965 
4966 		spin_unlock_irq(&dev->txq.lock);
4967 
4968 		if (!pipe_halted &&
4969 		    netif_device_present(dev->net) &&
4970 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
4971 			netif_start_queue(dev->net);
4972 
4973 		ret = lan78xx_start_tx_path(dev);
4974 		if (ret < 0)
4975 			goto out;
4976 
4977 		napi_schedule(&dev->napi);
4978 
4979 		if (!timer_pending(&dev->stat_monitor)) {
4980 			dev->delta = 1;
4981 			mod_timer(&dev->stat_monitor,
4982 				  jiffies + STAT_UPDATE_TIMER);
4983 		}
4984 
4985 	} else {
4986 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4987 	}
4988 
4989 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4990 	if (ret < 0)
4991 		goto out;
4992 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4993 	if (ret < 0)
4994 		goto out;
4995 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4996 	if (ret < 0)
4997 		goto out;
4998 
4999 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5000 					     WUCSR2_ARP_RCD_ |
5001 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5002 					     WUCSR2_IPV4_TCPSYN_RCD_);
5003 	if (ret < 0)
5004 		goto out;
5005 
5006 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5007 					    WUCSR_EEE_RX_WAKE_ |
5008 					    WUCSR_PFDA_FR_ |
5009 					    WUCSR_RFE_WAKE_FR_ |
5010 					    WUCSR_WUFR_ |
5011 					    WUCSR_MPR_ |
5012 					    WUCSR_BCST_FR_);
5013 	if (ret < 0)
5014 		goto out;
5015 
5016 	ret = 0;
5017 out:
5018 	mutex_unlock(&dev->dev_mutex);
5019 
5020 	return ret;
5021 }
5022 
lan78xx_reset_resume(struct usb_interface * intf)5023 static int lan78xx_reset_resume(struct usb_interface *intf)
5024 {
5025 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5026 	int ret;
5027 
5028 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5029 
5030 	ret = lan78xx_reset(dev);
5031 	if (ret < 0)
5032 		return ret;
5033 
5034 	phy_start(dev->net->phydev);
5035 
5036 	ret = lan78xx_resume(intf);
5037 
5038 	return ret;
5039 }
5040 
5041 static const struct usb_device_id products[] = {
5042 	{
5043 	/* LAN7800 USB Gigabit Ethernet Device */
5044 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5045 	},
5046 	{
5047 	/* LAN7850 USB Gigabit Ethernet Device */
5048 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5049 	},
5050 	{
5051 	/* LAN7801 USB Gigabit Ethernet Device */
5052 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5053 	},
5054 	{
5055 	/* ATM2-AF USB Gigabit Ethernet Device */
5056 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5057 	},
5058 	{},
5059 };
5060 MODULE_DEVICE_TABLE(usb, products);
5061 
5062 static struct usb_driver lan78xx_driver = {
5063 	.name			= DRIVER_NAME,
5064 	.id_table		= products,
5065 	.probe			= lan78xx_probe,
5066 	.disconnect		= lan78xx_disconnect,
5067 	.suspend		= lan78xx_suspend,
5068 	.resume			= lan78xx_resume,
5069 	.reset_resume		= lan78xx_reset_resume,
5070 	.supports_autosuspend	= 1,
5071 	.disable_hub_initiated_lpm = 1,
5072 };
5073 
5074 module_usb_driver(lan78xx_driver);
5075 
5076 MODULE_AUTHOR(DRIVER_AUTHOR);
5077 MODULE_DESCRIPTION(DRIVER_DESC);
5078 MODULE_LICENSE("GPL");
5079