1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * This code is derived from the VIA reference driver (copyright message
4  * below) provided to Red Hat by VIA Networking Technologies, Inc. for
5  * addition to the Linux kernel.
6  *
7  * The code has been merged into one source file, cleaned up to follow
8  * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
9  * for 64bit hardware platforms.
10  *
11  * TODO
12  *	rx_copybreak/alignment
13  *	More testing
14  *
15  * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
16  * Additional fixes and clean up: Francois Romieu
17  *
18  * This source has not been verified for use in safety critical systems.
19  *
20  * Please direct queries about the revamped driver to the linux-kernel
21  * list not VIA.
22  *
23  * Original code:
24  *
25  * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
26  * All rights reserved.
27  *
28  * Author: Chuang Liang-Shing, AJ Jiang
29  *
30  * Date: Jan 24, 2003
31  *
32  * MODULE_LICENSE("GPL");
33  */
34 
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/bitops.h>
40 #include <linux/init.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/mm.h>
43 #include <linux/errno.h>
44 #include <linux/ioport.h>
45 #include <linux/pci.h>
46 #include <linux/kernel.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/skbuff.h>
50 #include <linux/delay.h>
51 #include <linux/timer.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/string.h>
55 #include <linux/wait.h>
56 #include <linux/io.h>
57 #include <linux/if.h>
58 #include <linux/uaccess.h>
59 #include <linux/proc_fs.h>
60 #include <linux/of_address.h>
61 #include <linux/of_device.h>
62 #include <linux/of_irq.h>
63 #include <linux/inetdevice.h>
64 #include <linux/platform_device.h>
65 #include <linux/reboot.h>
66 #include <linux/ethtool.h>
67 #include <linux/mii.h>
68 #include <linux/in.h>
69 #include <linux/if_arp.h>
70 #include <linux/if_vlan.h>
71 #include <linux/ip.h>
72 #include <linux/tcp.h>
73 #include <linux/udp.h>
74 #include <linux/crc-ccitt.h>
75 #include <linux/crc32.h>
76 
77 #include "via-velocity.h"
78 
79 enum velocity_bus_type {
80 	BUS_PCI,
81 	BUS_PLATFORM,
82 };
83 
84 static int velocity_nics;
85 
86 static void velocity_set_power_state(struct velocity_info *vptr, char state)
87 {
88 	void *addr = vptr->mac_regs;
89 
90 	if (vptr->pdev)
91 		pci_set_power_state(vptr->pdev, state);
92 	else
93 		writeb(state, addr + 0x154);
94 }
95 
96 /**
97  *	mac_get_cam_mask	-	Read a CAM mask
98  *	@regs: register block for this velocity
99  *	@mask: buffer to store mask
100  *
101  *	Fetch the mask bits of the selected CAM and store them into the
102  *	provided mask buffer.
103  */
104 static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
105 {
106 	int i;
107 
108 	/* Select CAM mask */
109 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
110 
111 	writeb(0, &regs->CAMADDR);
112 
113 	/* read mask */
114 	for (i = 0; i < 8; i++)
115 		*mask++ = readb(&(regs->MARCAM[i]));
116 
117 	/* disable CAMEN */
118 	writeb(0, &regs->CAMADDR);
119 
120 	/* Select mar */
121 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
122 }
123 
124 /**
125  *	mac_set_cam_mask	-	Set a CAM mask
126  *	@regs: register block for this velocity
127  *	@mask: CAM mask to load
128  *
129  *	Store a new mask into a CAM
130  */
131 static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
132 {
133 	int i;
134 	/* Select CAM mask */
135 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
136 
137 	writeb(CAMADDR_CAMEN, &regs->CAMADDR);
138 
139 	for (i = 0; i < 8; i++)
140 		writeb(*mask++, &(regs->MARCAM[i]));
141 
142 	/* disable CAMEN */
143 	writeb(0, &regs->CAMADDR);
144 
145 	/* Select mar */
146 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
147 }
148 
149 static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
150 {
151 	int i;
152 	/* Select CAM mask */
153 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
154 
155 	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
156 
157 	for (i = 0; i < 8; i++)
158 		writeb(*mask++, &(regs->MARCAM[i]));
159 
160 	/* disable CAMEN */
161 	writeb(0, &regs->CAMADDR);
162 
163 	/* Select mar */
164 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
165 }
166 
167 /**
168  *	mac_set_cam	-	set CAM data
169  *	@regs: register block of this velocity
170  *	@idx: Cam index
171  *	@addr: 2 or 6 bytes of CAM data
172  *
173  *	Load an address or vlan tag into a CAM
174  */
175 static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
176 {
177 	int i;
178 
179 	/* Select CAM mask */
180 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
181 
182 	idx &= (64 - 1);
183 
184 	writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
185 
186 	for (i = 0; i < 6; i++)
187 		writeb(*addr++, &(regs->MARCAM[i]));
188 
189 	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
190 
191 	udelay(10);
192 
193 	writeb(0, &regs->CAMADDR);
194 
195 	/* Select mar */
196 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
197 }
198 
199 static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
200 			     const u8 *addr)
201 {
202 
203 	/* Select CAM mask */
204 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
205 
206 	idx &= (64 - 1);
207 
208 	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
209 	writew(*((u16 *) addr), &regs->MARCAM[0]);
210 
211 	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
212 
213 	udelay(10);
214 
215 	writeb(0, &regs->CAMADDR);
216 
217 	/* Select mar */
218 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
219 }
220 
221 
222 /**
223  *	mac_wol_reset	-	reset WOL after exiting low power
224  *	@regs: register block of this velocity
225  *
226  *	Called after we drop out of wake on lan mode in order to
227  *	reset the Wake on lan features. This function doesn't restore
228  *	the rest of the logic from the result of sleep/wakeup
229  */
230 static void mac_wol_reset(struct mac_regs __iomem *regs)
231 {
232 
233 	/* Turn off SWPTAG right after leaving power mode */
234 	BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
235 	/* clear sticky bits */
236 	BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
237 
238 	BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
239 	BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
240 	/* disable force PME-enable */
241 	writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
242 	/* disable power-event config bit */
243 	writew(0xFFFF, &regs->WOLCRClr);
244 	/* clear power status */
245 	writew(0xFFFF, &regs->WOLSRClr);
246 }
247 
248 static const struct ethtool_ops velocity_ethtool_ops;
249 
250 /*
251     Define module options
252 */
253 
254 MODULE_AUTHOR("VIA Networking Technologies, Inc.");
255 MODULE_LICENSE("GPL");
256 MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
257 
258 #define VELOCITY_PARAM(N, D) \
259 	static int N[MAX_UNITS] = OPTION_DEFAULT;\
260 	module_param_array(N, int, NULL, 0); \
261 	MODULE_PARM_DESC(N, D);
262 
263 #define RX_DESC_MIN     64
264 #define RX_DESC_MAX     255
265 #define RX_DESC_DEF     64
266 VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
267 
268 #define TX_DESC_MIN     16
269 #define TX_DESC_MAX     256
270 #define TX_DESC_DEF     64
271 VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
272 
273 #define RX_THRESH_MIN   0
274 #define RX_THRESH_MAX   3
275 #define RX_THRESH_DEF   0
276 /* rx_thresh[] is used for controlling the receive fifo threshold.
277    0: indicate the rxfifo threshold is 128 bytes.
278    1: indicate the rxfifo threshold is 512 bytes.
279    2: indicate the rxfifo threshold is 1024 bytes.
280    3: indicate the rxfifo threshold is store & forward.
281 */
282 VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
283 
284 #define DMA_LENGTH_MIN  0
285 #define DMA_LENGTH_MAX  7
286 #define DMA_LENGTH_DEF  6
287 
288 /* DMA_length[] is used for controlling the DMA length
289    0: 8 DWORDs
290    1: 16 DWORDs
291    2: 32 DWORDs
292    3: 64 DWORDs
293    4: 128 DWORDs
294    5: 256 DWORDs
295    6: SF(flush till emply)
296    7: SF(flush till emply)
297 */
298 VELOCITY_PARAM(DMA_length, "DMA length");
299 
300 #define IP_ALIG_DEF     0
301 /* IP_byte_align[] is used for IP header DWORD byte aligned
302    0: indicate the IP header won't be DWORD byte aligned.(Default) .
303    1: indicate the IP header will be DWORD byte aligned.
304       In some environment, the IP header should be DWORD byte aligned,
305       or the packet will be droped when we receive it. (eg: IPVS)
306 */
307 VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
308 
309 #define FLOW_CNTL_DEF   1
310 #define FLOW_CNTL_MIN   1
311 #define FLOW_CNTL_MAX   5
312 
313 /* flow_control[] is used for setting the flow control ability of NIC.
314    1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
315    2: enable TX flow control.
316    3: enable RX flow control.
317    4: enable RX/TX flow control.
318    5: disable
319 */
320 VELOCITY_PARAM(flow_control, "Enable flow control ability");
321 
322 #define MED_LNK_DEF 0
323 #define MED_LNK_MIN 0
324 #define MED_LNK_MAX 5
325 /* speed_duplex[] is used for setting the speed and duplex mode of NIC.
326    0: indicate autonegotiation for both speed and duplex mode
327    1: indicate 100Mbps half duplex mode
328    2: indicate 100Mbps full duplex mode
329    3: indicate 10Mbps half duplex mode
330    4: indicate 10Mbps full duplex mode
331    5: indicate 1000Mbps full duplex mode
332 
333    Note:
334    if EEPROM have been set to the force mode, this option is ignored
335    by driver.
336 */
337 VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
338 
339 #define WOL_OPT_DEF     0
340 #define WOL_OPT_MIN     0
341 #define WOL_OPT_MAX     7
342 /* wol_opts[] is used for controlling wake on lan behavior.
343    0: Wake up if recevied a magic packet. (Default)
344    1: Wake up if link status is on/off.
345    2: Wake up if recevied an arp packet.
346    4: Wake up if recevied any unicast packet.
347    Those value can be sumed up to support more than one option.
348 */
349 VELOCITY_PARAM(wol_opts, "Wake On Lan options");
350 
351 static int rx_copybreak = 200;
352 module_param(rx_copybreak, int, 0644);
353 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
354 
355 /*
356  *	Internal board variants. At the moment we have only one
357  */
358 static struct velocity_info_tbl chip_info_table[] = {
359 	{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
360 	{ }
361 };
362 
363 /*
364  *	Describe the PCI device identifiers that we support in this
365  *	device driver. Used for hotplug autoloading.
366  */
367 
368 static const struct pci_device_id velocity_pci_id_table[] = {
369 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
370 	{ }
371 };
372 
373 MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
374 
375 /**
376  *	Describe the OF device identifiers that we support in this
377  *	device driver. Used for devicetree nodes.
378  */
379 static const struct of_device_id velocity_of_ids[] = {
380 	{ .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
381 	{ /* Sentinel */ },
382 };
383 MODULE_DEVICE_TABLE(of, velocity_of_ids);
384 
385 /**
386  *	get_chip_name	- 	identifier to name
387  *	@id: chip identifier
388  *
389  *	Given a chip identifier return a suitable description. Returns
390  *	a pointer a static string valid while the driver is loaded.
391  */
392 static const char *get_chip_name(enum chip_type chip_id)
393 {
394 	int i;
395 	for (i = 0; chip_info_table[i].name != NULL; i++)
396 		if (chip_info_table[i].chip_id == chip_id)
397 			break;
398 	return chip_info_table[i].name;
399 }
400 
401 /**
402  *	velocity_set_int_opt	-	parser for integer options
403  *	@opt: pointer to option value
404  *	@val: value the user requested (or -1 for default)
405  *	@min: lowest value allowed
406  *	@max: highest value allowed
407  *	@def: default value
408  *	@name: property name
409  *
410  *	Set an integer property in the module options. This function does
411  *	all the verification and checking as well as reporting so that
412  *	we don't duplicate code for each option.
413  */
414 static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
415 				 char *name)
416 {
417 	if (val == -1)
418 		*opt = def;
419 	else if (val < min || val > max) {
420 		pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
421 			  name, min, max);
422 		*opt = def;
423 	} else {
424 		pr_info("set value of parameter %s to %d\n", name, val);
425 		*opt = val;
426 	}
427 }
428 
429 /**
430  *	velocity_set_bool_opt	-	parser for boolean options
431  *	@opt: pointer to option value
432  *	@val: value the user requested (or -1 for default)
433  *	@def: default value (yes/no)
434  *	@flag: numeric value to set for true.
435  *	@name: property name
436  *
437  *	Set a boolean property in the module options. This function does
438  *	all the verification and checking as well as reporting so that
439  *	we don't duplicate code for each option.
440  */
441 static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
442 				  char *name)
443 {
444 	(*opt) &= (~flag);
445 	if (val == -1)
446 		*opt |= (def ? flag : 0);
447 	else if (val < 0 || val > 1) {
448 		pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
449 			  name, 0, 1);
450 		*opt |= (def ? flag : 0);
451 	} else {
452 		pr_info("set parameter %s to %s\n",
453 			name, val ? "TRUE" : "FALSE");
454 		*opt |= (val ? flag : 0);
455 	}
456 }
457 
458 /**
459  *	velocity_get_options	-	set options on device
460  *	@opts: option structure for the device
461  *	@index: index of option to use in module options array
462  *
463  *	Turn the module and command options into a single structure
464  *	for the current device
465  */
466 static void velocity_get_options(struct velocity_opt *opts, int index)
467 {
468 
469 	velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index],
470 			     RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF,
471 			     "rx_thresh");
472 	velocity_set_int_opt(&opts->DMA_length, DMA_length[index],
473 			     DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF,
474 			     "DMA_length");
475 	velocity_set_int_opt(&opts->numrx, RxDescriptors[index],
476 			     RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF,
477 			     "RxDescriptors");
478 	velocity_set_int_opt(&opts->numtx, TxDescriptors[index],
479 			     TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF,
480 			     "TxDescriptors");
481 
482 	velocity_set_int_opt(&opts->flow_cntl, flow_control[index],
483 			     FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF,
484 			     "flow_control");
485 	velocity_set_bool_opt(&opts->flags, IP_byte_align[index],
486 			      IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN,
487 			      "IP_byte_align");
488 	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index],
489 			     MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF,
490 			     "Media link mode");
491 	velocity_set_int_opt(&opts->wol_opts, wol_opts[index],
492 			     WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF,
493 			     "Wake On Lan options");
494 	opts->numrx = (opts->numrx & ~3);
495 }
496 
497 /**
498  *	velocity_init_cam_filter	-	initialise CAM
499  *	@vptr: velocity to program
500  *
501  *	Initialize the content addressable memory used for filters. Load
502  *	appropriately according to the presence of VLAN
503  */
504 static void velocity_init_cam_filter(struct velocity_info *vptr)
505 {
506 	struct mac_regs __iomem *regs = vptr->mac_regs;
507 	unsigned int vid, i = 0;
508 
509 	/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
510 	WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
511 	WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
512 
513 	/* Disable all CAMs */
514 	memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
515 	memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
516 	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
517 	mac_set_cam_mask(regs, vptr->mCAMmask);
518 
519 	/* Enable VCAMs */
520 	for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
521 		mac_set_vlan_cam(regs, i, (u8 *) &vid);
522 		vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
523 		if (++i >= VCAM_SIZE)
524 			break;
525 	}
526 	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
527 }
528 
529 static int velocity_vlan_rx_add_vid(struct net_device *dev,
530 				    __be16 proto, u16 vid)
531 {
532 	struct velocity_info *vptr = netdev_priv(dev);
533 
534 	spin_lock_irq(&vptr->lock);
535 	set_bit(vid, vptr->active_vlans);
536 	velocity_init_cam_filter(vptr);
537 	spin_unlock_irq(&vptr->lock);
538 	return 0;
539 }
540 
541 static int velocity_vlan_rx_kill_vid(struct net_device *dev,
542 				     __be16 proto, u16 vid)
543 {
544 	struct velocity_info *vptr = netdev_priv(dev);
545 
546 	spin_lock_irq(&vptr->lock);
547 	clear_bit(vid, vptr->active_vlans);
548 	velocity_init_cam_filter(vptr);
549 	spin_unlock_irq(&vptr->lock);
550 	return 0;
551 }
552 
553 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
554 {
555 	vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
556 }
557 
558 /**
559  *	velocity_rx_reset	-	handle a receive reset
560  *	@vptr: velocity we are resetting
561  *
562  *	Reset the ownership and status for the receive ring side.
563  *	Hand all the receive queue to the NIC.
564  */
565 static void velocity_rx_reset(struct velocity_info *vptr)
566 {
567 
568 	struct mac_regs __iomem *regs = vptr->mac_regs;
569 	int i;
570 
571 	velocity_init_rx_ring_indexes(vptr);
572 
573 	/*
574 	 *	Init state, all RD entries belong to the NIC
575 	 */
576 	for (i = 0; i < vptr->options.numrx; ++i)
577 		vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
578 
579 	writew(vptr->options.numrx, &regs->RBRDU);
580 	writel(vptr->rx.pool_dma, &regs->RDBaseLo);
581 	writew(0, &regs->RDIdx);
582 	writew(vptr->options.numrx - 1, &regs->RDCSize);
583 }
584 
585 /**
586  *	velocity_get_opt_media_mode	-	get media selection
587  *	@vptr: velocity adapter
588  *
589  *	Get the media mode stored in EEPROM or module options and load
590  *	mii_status accordingly. The requested link state information
591  *	is also returned.
592  */
593 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
594 {
595 	u32 status = 0;
596 
597 	switch (vptr->options.spd_dpx) {
598 	case SPD_DPX_AUTO:
599 		status = VELOCITY_AUTONEG_ENABLE;
600 		break;
601 	case SPD_DPX_100_FULL:
602 		status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
603 		break;
604 	case SPD_DPX_10_FULL:
605 		status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
606 		break;
607 	case SPD_DPX_100_HALF:
608 		status = VELOCITY_SPEED_100;
609 		break;
610 	case SPD_DPX_10_HALF:
611 		status = VELOCITY_SPEED_10;
612 		break;
613 	case SPD_DPX_1000_FULL:
614 		status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
615 		break;
616 	}
617 	vptr->mii_status = status;
618 	return status;
619 }
620 
621 /**
622  *	safe_disable_mii_autopoll	-	autopoll off
623  *	@regs: velocity registers
624  *
625  *	Turn off the autopoll and wait for it to disable on the chip
626  */
627 static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
628 {
629 	u16 ww;
630 
631 	/*  turn off MAUTO */
632 	writeb(0, &regs->MIICR);
633 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
634 		udelay(1);
635 		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
636 			break;
637 	}
638 }
639 
640 /**
641  *	enable_mii_autopoll	-	turn on autopolling
642  *	@regs: velocity registers
643  *
644  *	Enable the MII link status autopoll feature on the Velocity
645  *	hardware. Wait for it to enable.
646  */
647 static void enable_mii_autopoll(struct mac_regs __iomem *regs)
648 {
649 	int ii;
650 
651 	writeb(0, &(regs->MIICR));
652 	writeb(MIIADR_SWMPL, &regs->MIIADR);
653 
654 	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
655 		udelay(1);
656 		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
657 			break;
658 	}
659 
660 	writeb(MIICR_MAUTO, &regs->MIICR);
661 
662 	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
663 		udelay(1);
664 		if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
665 			break;
666 	}
667 
668 }
669 
670 /**
671  *	velocity_mii_read	-	read MII data
672  *	@regs: velocity registers
673  *	@index: MII register index
674  *	@data: buffer for received data
675  *
676  *	Perform a single read of an MII 16bit register. Returns zero
677  *	on success or -ETIMEDOUT if the PHY did not respond.
678  */
679 static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
680 {
681 	u16 ww;
682 
683 	/*
684 	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
685 	 */
686 	safe_disable_mii_autopoll(regs);
687 
688 	writeb(index, &regs->MIIADR);
689 
690 	BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
691 
692 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
693 		if (!(readb(&regs->MIICR) & MIICR_RCMD))
694 			break;
695 	}
696 
697 	*data = readw(&regs->MIIDATA);
698 
699 	enable_mii_autopoll(regs);
700 	if (ww == W_MAX_TIMEOUT)
701 		return -ETIMEDOUT;
702 	return 0;
703 }
704 
705 /**
706  *	mii_check_media_mode	-	check media state
707  *	@regs: velocity registers
708  *
709  *	Check the current MII status and determine the link status
710  *	accordingly
711  */
712 static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
713 {
714 	u32 status = 0;
715 	u16 ANAR;
716 
717 	if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
718 		status |= VELOCITY_LINK_FAIL;
719 
720 	if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
721 		status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
722 	else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
723 		status |= (VELOCITY_SPEED_1000);
724 	else {
725 		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
726 		if (ANAR & ADVERTISE_100FULL)
727 			status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
728 		else if (ANAR & ADVERTISE_100HALF)
729 			status |= VELOCITY_SPEED_100;
730 		else if (ANAR & ADVERTISE_10FULL)
731 			status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
732 		else
733 			status |= (VELOCITY_SPEED_10);
734 	}
735 
736 	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
737 		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
738 		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
739 		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
740 			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
741 				status |= VELOCITY_AUTONEG_ENABLE;
742 		}
743 	}
744 
745 	return status;
746 }
747 
748 /**
749  *	velocity_mii_write	-	write MII data
750  *	@regs: velocity registers
751  *	@index: MII register index
752  *	@data: 16bit data for the MII register
753  *
754  *	Perform a single write to an MII 16bit register. Returns zero
755  *	on success or -ETIMEDOUT if the PHY did not respond.
756  */
757 static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
758 {
759 	u16 ww;
760 
761 	/*
762 	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
763 	 */
764 	safe_disable_mii_autopoll(regs);
765 
766 	/* MII reg offset */
767 	writeb(mii_addr, &regs->MIIADR);
768 	/* set MII data */
769 	writew(data, &regs->MIIDATA);
770 
771 	/* turn on MIICR_WCMD */
772 	BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
773 
774 	/* W_MAX_TIMEOUT is the timeout period */
775 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
776 		udelay(5);
777 		if (!(readb(&regs->MIICR) & MIICR_WCMD))
778 			break;
779 	}
780 	enable_mii_autopoll(regs);
781 
782 	if (ww == W_MAX_TIMEOUT)
783 		return -ETIMEDOUT;
784 	return 0;
785 }
786 
787 /**
788  *	set_mii_flow_control	-	flow control setup
789  *	@vptr: velocity interface
790  *
791  *	Set up the flow control on this interface according to
792  *	the supplied user/eeprom options.
793  */
794 static void set_mii_flow_control(struct velocity_info *vptr)
795 {
796 	/*Enable or Disable PAUSE in ANAR */
797 	switch (vptr->options.flow_cntl) {
798 	case FLOW_CNTL_TX:
799 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
800 		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
801 		break;
802 
803 	case FLOW_CNTL_RX:
804 		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
805 		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
806 		break;
807 
808 	case FLOW_CNTL_TX_RX:
809 		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
810 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
811 		break;
812 
813 	case FLOW_CNTL_DISABLE:
814 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
815 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
816 		break;
817 	default:
818 		break;
819 	}
820 }
821 
822 /**
823  *	mii_set_auto_on		-	autonegotiate on
824  *	@vptr: velocity
825  *
826  *	Enable autonegotation on this interface
827  */
828 static void mii_set_auto_on(struct velocity_info *vptr)
829 {
830 	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
831 		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
832 	else
833 		MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
834 }
835 
836 static u32 check_connection_type(struct mac_regs __iomem *regs)
837 {
838 	u32 status = 0;
839 	u8 PHYSR0;
840 	u16 ANAR;
841 	PHYSR0 = readb(&regs->PHYSR0);
842 
843 	/*
844 	   if (!(PHYSR0 & PHYSR0_LINKGD))
845 	   status|=VELOCITY_LINK_FAIL;
846 	 */
847 
848 	if (PHYSR0 & PHYSR0_FDPX)
849 		status |= VELOCITY_DUPLEX_FULL;
850 
851 	if (PHYSR0 & PHYSR0_SPDG)
852 		status |= VELOCITY_SPEED_1000;
853 	else if (PHYSR0 & PHYSR0_SPD10)
854 		status |= VELOCITY_SPEED_10;
855 	else
856 		status |= VELOCITY_SPEED_100;
857 
858 	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
859 		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
860 		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
861 		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
862 			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
863 				status |= VELOCITY_AUTONEG_ENABLE;
864 		}
865 	}
866 
867 	return status;
868 }
869 
870 /**
871  *	velocity_set_media_mode		-	set media mode
872  *	@mii_status: old MII link state
873  *
874  *	Check the media link state and configure the flow control
875  *	PHY and also velocity hardware setup accordingly. In particular
876  *	we need to set up CD polling and frame bursting.
877  */
878 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
879 {
880 	u32 curr_status;
881 	struct mac_regs __iomem *regs = vptr->mac_regs;
882 
883 	vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
884 	curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
885 
886 	/* Set mii link status */
887 	set_mii_flow_control(vptr);
888 
889 	/*
890 	   Check if new status is consistent with current status
891 	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
892 	       (mii_status==curr_status)) {
893 	   vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
894 	   vptr->mii_status=check_connection_type(vptr->mac_regs);
895 	   netdev_info(vptr->netdev, "Velocity link no change\n");
896 	   return 0;
897 	   }
898 	 */
899 
900 	if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
901 		MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
902 
903 	/*
904 	 *	If connection type is AUTO
905 	 */
906 	if (mii_status & VELOCITY_AUTONEG_ENABLE) {
907 		netdev_info(vptr->netdev, "Velocity is in AUTO mode\n");
908 		/* clear force MAC mode bit */
909 		BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
910 		/* set duplex mode of MAC according to duplex mode of MII */
911 		MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
912 		MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
913 		MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
914 
915 		/* enable AUTO-NEGO mode */
916 		mii_set_auto_on(vptr);
917 	} else {
918 		u16 CTRL1000;
919 		u16 ANAR;
920 		u8 CHIPGCR;
921 
922 		/*
923 		 * 1. if it's 3119, disable frame bursting in halfduplex mode
924 		 *    and enable it in fullduplex mode
925 		 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
926 		 * 3. only enable CD heart beat counter in 10HD mode
927 		 */
928 
929 		/* set force MAC mode bit */
930 		BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
931 
932 		CHIPGCR = readb(&regs->CHIPGCR);
933 
934 		if (mii_status & VELOCITY_SPEED_1000)
935 			CHIPGCR |= CHIPGCR_FCGMII;
936 		else
937 			CHIPGCR &= ~CHIPGCR_FCGMII;
938 
939 		if (mii_status & VELOCITY_DUPLEX_FULL) {
940 			CHIPGCR |= CHIPGCR_FCFDX;
941 			writeb(CHIPGCR, &regs->CHIPGCR);
942 			netdev_info(vptr->netdev,
943 				    "set Velocity to forced full mode\n");
944 			if (vptr->rev_id < REV_ID_VT3216_A0)
945 				BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
946 		} else {
947 			CHIPGCR &= ~CHIPGCR_FCFDX;
948 			netdev_info(vptr->netdev,
949 				    "set Velocity to forced half mode\n");
950 			writeb(CHIPGCR, &regs->CHIPGCR);
951 			if (vptr->rev_id < REV_ID_VT3216_A0)
952 				BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
953 		}
954 
955 		velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
956 		CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
957 		if ((mii_status & VELOCITY_SPEED_1000) &&
958 		    (mii_status & VELOCITY_DUPLEX_FULL)) {
959 			CTRL1000 |= ADVERTISE_1000FULL;
960 		}
961 		velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
962 
963 		if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
964 			BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
965 		else
966 			BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
967 
968 		/* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
969 		velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
970 		ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
971 		if (mii_status & VELOCITY_SPEED_100) {
972 			if (mii_status & VELOCITY_DUPLEX_FULL)
973 				ANAR |= ADVERTISE_100FULL;
974 			else
975 				ANAR |= ADVERTISE_100HALF;
976 		} else if (mii_status & VELOCITY_SPEED_10) {
977 			if (mii_status & VELOCITY_DUPLEX_FULL)
978 				ANAR |= ADVERTISE_10FULL;
979 			else
980 				ANAR |= ADVERTISE_10HALF;
981 		}
982 		velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
983 		/* enable AUTO-NEGO mode */
984 		mii_set_auto_on(vptr);
985 		/* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
986 	}
987 	/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
988 	/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
989 	return VELOCITY_LINK_CHANGE;
990 }
991 
992 /**
993  *	velocity_print_link_status	-	link status reporting
994  *	@vptr: velocity to report on
995  *
996  *	Turn the link status of the velocity card into a kernel log
997  *	description of the new link state, detailing speed and duplex
998  *	status
999  */
1000 static void velocity_print_link_status(struct velocity_info *vptr)
1001 {
1002 	const char *link;
1003 	const char *speed;
1004 	const char *duplex;
1005 
1006 	if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1007 		netdev_notice(vptr->netdev, "failed to detect cable link\n");
1008 		return;
1009 	}
1010 
1011 	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1012 		link = "auto-negotiation";
1013 
1014 		if (vptr->mii_status & VELOCITY_SPEED_1000)
1015 			speed = "1000";
1016 		else if (vptr->mii_status & VELOCITY_SPEED_100)
1017 			speed = "100";
1018 		else
1019 			speed = "10";
1020 
1021 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1022 			duplex = "full";
1023 		else
1024 			duplex = "half";
1025 	} else {
1026 		link = "forced";
1027 
1028 		switch (vptr->options.spd_dpx) {
1029 		case SPD_DPX_1000_FULL:
1030 			speed = "1000";
1031 			duplex = "full";
1032 			break;
1033 		case SPD_DPX_100_HALF:
1034 			speed = "100";
1035 			duplex = "half";
1036 			break;
1037 		case SPD_DPX_100_FULL:
1038 			speed = "100";
1039 			duplex = "full";
1040 			break;
1041 		case SPD_DPX_10_HALF:
1042 			speed = "10";
1043 			duplex = "half";
1044 			break;
1045 		case SPD_DPX_10_FULL:
1046 			speed = "10";
1047 			duplex = "full";
1048 			break;
1049 		default:
1050 			speed = "unknown";
1051 			duplex = "unknown";
1052 			break;
1053 		}
1054 	}
1055 	netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n",
1056 		      link, speed, duplex);
1057 }
1058 
1059 /**
1060  *	enable_flow_control_ability	-	flow control
1061  *	@vptr: veloity to configure
1062  *
1063  *	Set up flow control according to the flow control options
1064  *	determined by the eeprom/configuration.
1065  */
1066 static void enable_flow_control_ability(struct velocity_info *vptr)
1067 {
1068 
1069 	struct mac_regs __iomem *regs = vptr->mac_regs;
1070 
1071 	switch (vptr->options.flow_cntl) {
1072 
1073 	case FLOW_CNTL_DEFAULT:
1074 		if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1075 			writel(CR0_FDXRFCEN, &regs->CR0Set);
1076 		else
1077 			writel(CR0_FDXRFCEN, &regs->CR0Clr);
1078 
1079 		if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1080 			writel(CR0_FDXTFCEN, &regs->CR0Set);
1081 		else
1082 			writel(CR0_FDXTFCEN, &regs->CR0Clr);
1083 		break;
1084 
1085 	case FLOW_CNTL_TX:
1086 		writel(CR0_FDXTFCEN, &regs->CR0Set);
1087 		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1088 		break;
1089 
1090 	case FLOW_CNTL_RX:
1091 		writel(CR0_FDXRFCEN, &regs->CR0Set);
1092 		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1093 		break;
1094 
1095 	case FLOW_CNTL_TX_RX:
1096 		writel(CR0_FDXTFCEN, &regs->CR0Set);
1097 		writel(CR0_FDXRFCEN, &regs->CR0Set);
1098 		break;
1099 
1100 	case FLOW_CNTL_DISABLE:
1101 		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1102 		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1103 		break;
1104 
1105 	default:
1106 		break;
1107 	}
1108 
1109 }
1110 
1111 /**
1112  *	velocity_soft_reset	-	soft reset
1113  *	@vptr: velocity to reset
1114  *
1115  *	Kick off a soft reset of the velocity adapter and then poll
1116  *	until the reset sequence has completed before returning.
1117  */
1118 static int velocity_soft_reset(struct velocity_info *vptr)
1119 {
1120 	struct mac_regs __iomem *regs = vptr->mac_regs;
1121 	int i = 0;
1122 
1123 	writel(CR0_SFRST, &regs->CR0Set);
1124 
1125 	for (i = 0; i < W_MAX_TIMEOUT; i++) {
1126 		udelay(5);
1127 		if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1128 			break;
1129 	}
1130 
1131 	if (i == W_MAX_TIMEOUT) {
1132 		writel(CR0_FORSRST, &regs->CR0Set);
1133 		/* FIXME: PCI POSTING */
1134 		/* delay 2ms */
1135 		mdelay(2);
1136 	}
1137 	return 0;
1138 }
1139 
1140 /**
1141  *	velocity_set_multi	-	filter list change callback
1142  *	@dev: network device
1143  *
1144  *	Called by the network layer when the filter lists need to change
1145  *	for a velocity adapter. Reload the CAMs with the new address
1146  *	filter ruleset.
1147  */
1148 static void velocity_set_multi(struct net_device *dev)
1149 {
1150 	struct velocity_info *vptr = netdev_priv(dev);
1151 	struct mac_regs __iomem *regs = vptr->mac_regs;
1152 	u8 rx_mode;
1153 	int i;
1154 	struct netdev_hw_addr *ha;
1155 
1156 	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
1157 		writel(0xffffffff, &regs->MARCAM[0]);
1158 		writel(0xffffffff, &regs->MARCAM[4]);
1159 		rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1160 	} else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1161 		   (dev->flags & IFF_ALLMULTI)) {
1162 		writel(0xffffffff, &regs->MARCAM[0]);
1163 		writel(0xffffffff, &regs->MARCAM[4]);
1164 		rx_mode = (RCR_AM | RCR_AB);
1165 	} else {
1166 		int offset = MCAM_SIZE - vptr->multicast_limit;
1167 		mac_get_cam_mask(regs, vptr->mCAMmask);
1168 
1169 		i = 0;
1170 		netdev_for_each_mc_addr(ha, dev) {
1171 			mac_set_cam(regs, i + offset, ha->addr);
1172 			vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1173 			i++;
1174 		}
1175 
1176 		mac_set_cam_mask(regs, vptr->mCAMmask);
1177 		rx_mode = RCR_AM | RCR_AB | RCR_AP;
1178 	}
1179 	if (dev->mtu > 1500)
1180 		rx_mode |= RCR_AL;
1181 
1182 	BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1183 
1184 }
1185 
1186 /*
1187  * MII access , media link mode setting functions
1188  */
1189 
1190 /**
1191  *	mii_init	-	set up MII
1192  *	@vptr: velocity adapter
1193  *	@mii_status:  links tatus
1194  *
1195  *	Set up the PHY for the current link state.
1196  */
1197 static void mii_init(struct velocity_info *vptr, u32 mii_status)
1198 {
1199 	u16 BMCR;
1200 
1201 	switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1202 	case PHYID_ICPLUS_IP101A:
1203 		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1204 						MII_ADVERTISE, vptr->mac_regs);
1205 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1206 			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1207 								vptr->mac_regs);
1208 		else
1209 			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1210 								vptr->mac_regs);
1211 		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1212 		break;
1213 	case PHYID_CICADA_CS8201:
1214 		/*
1215 		 *	Reset to hardware default
1216 		 */
1217 		MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1218 		/*
1219 		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1220 		 *	off it in NWay-forced half mode for NWay-forced v.s.
1221 		 *	legacy-forced issue.
1222 		 */
1223 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1224 			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1225 		else
1226 			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1227 		/*
1228 		 *	Turn on Link/Activity LED enable bit for CIS8201
1229 		 */
1230 		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1231 		break;
1232 	case PHYID_VT3216_32BIT:
1233 	case PHYID_VT3216_64BIT:
1234 		/*
1235 		 *	Reset to hardware default
1236 		 */
1237 		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1238 		/*
1239 		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1240 		 *	off it in NWay-forced half mode for NWay-forced v.s.
1241 		 *	legacy-forced issue
1242 		 */
1243 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1244 			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1245 		else
1246 			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1247 		break;
1248 
1249 	case PHYID_MARVELL_1000:
1250 	case PHYID_MARVELL_1000S:
1251 		/*
1252 		 *	Assert CRS on Transmit
1253 		 */
1254 		MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1255 		/*
1256 		 *	Reset to hardware default
1257 		 */
1258 		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1259 		break;
1260 	default:
1261 		;
1262 	}
1263 	velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1264 	if (BMCR & BMCR_ISOLATE) {
1265 		BMCR &= ~BMCR_ISOLATE;
1266 		velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1267 	}
1268 }
1269 
1270 /**
1271  * setup_queue_timers	-	Setup interrupt timers
1272  *
1273  * Setup interrupt frequency during suppression (timeout if the frame
1274  * count isn't filled).
1275  */
1276 static void setup_queue_timers(struct velocity_info *vptr)
1277 {
1278 	/* Only for newer revisions */
1279 	if (vptr->rev_id >= REV_ID_VT3216_A0) {
1280 		u8 txqueue_timer = 0;
1281 		u8 rxqueue_timer = 0;
1282 
1283 		if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1284 				VELOCITY_SPEED_100)) {
1285 			txqueue_timer = vptr->options.txqueue_timer;
1286 			rxqueue_timer = vptr->options.rxqueue_timer;
1287 		}
1288 
1289 		writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1290 		writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1291 	}
1292 }
1293 
1294 /**
1295  * setup_adaptive_interrupts  -  Setup interrupt suppression
1296  *
1297  * @vptr velocity adapter
1298  *
1299  * The velocity is able to suppress interrupt during high interrupt load.
1300  * This function turns on that feature.
1301  */
1302 static void setup_adaptive_interrupts(struct velocity_info *vptr)
1303 {
1304 	struct mac_regs __iomem *regs = vptr->mac_regs;
1305 	u16 tx_intsup = vptr->options.tx_intsup;
1306 	u16 rx_intsup = vptr->options.rx_intsup;
1307 
1308 	/* Setup default interrupt mask (will be changed below) */
1309 	vptr->int_mask = INT_MASK_DEF;
1310 
1311 	/* Set Tx Interrupt Suppression Threshold */
1312 	writeb(CAMCR_PS0, &regs->CAMCR);
1313 	if (tx_intsup != 0) {
1314 		vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1315 				ISR_PTX2I | ISR_PTX3I);
1316 		writew(tx_intsup, &regs->ISRCTL);
1317 	} else
1318 		writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1319 
1320 	/* Set Rx Interrupt Suppression Threshold */
1321 	writeb(CAMCR_PS1, &regs->CAMCR);
1322 	if (rx_intsup != 0) {
1323 		vptr->int_mask &= ~ISR_PRXI;
1324 		writew(rx_intsup, &regs->ISRCTL);
1325 	} else
1326 		writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1327 
1328 	/* Select page to interrupt hold timer */
1329 	writeb(0, &regs->CAMCR);
1330 }
1331 
1332 /**
1333  *	velocity_init_registers	-	initialise MAC registers
1334  *	@vptr: velocity to init
1335  *	@type: type of initialisation (hot or cold)
1336  *
1337  *	Initialise the MAC on a reset or on first set up on the
1338  *	hardware.
1339  */
1340 static void velocity_init_registers(struct velocity_info *vptr,
1341 				    enum velocity_init_type type)
1342 {
1343 	struct mac_regs __iomem *regs = vptr->mac_regs;
1344 	struct net_device *netdev = vptr->netdev;
1345 	int i, mii_status;
1346 
1347 	mac_wol_reset(regs);
1348 
1349 	switch (type) {
1350 	case VELOCITY_INIT_RESET:
1351 	case VELOCITY_INIT_WOL:
1352 
1353 		netif_stop_queue(netdev);
1354 
1355 		/*
1356 		 *	Reset RX to prevent RX pointer not on the 4X location
1357 		 */
1358 		velocity_rx_reset(vptr);
1359 		mac_rx_queue_run(regs);
1360 		mac_rx_queue_wake(regs);
1361 
1362 		mii_status = velocity_get_opt_media_mode(vptr);
1363 		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1364 			velocity_print_link_status(vptr);
1365 			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1366 				netif_wake_queue(netdev);
1367 		}
1368 
1369 		enable_flow_control_ability(vptr);
1370 
1371 		mac_clear_isr(regs);
1372 		writel(CR0_STOP, &regs->CR0Clr);
1373 		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1374 							&regs->CR0Set);
1375 
1376 		break;
1377 
1378 	case VELOCITY_INIT_COLD:
1379 	default:
1380 		/*
1381 		 *	Do reset
1382 		 */
1383 		velocity_soft_reset(vptr);
1384 		mdelay(5);
1385 
1386 		if (!vptr->no_eeprom) {
1387 			mac_eeprom_reload(regs);
1388 			for (i = 0; i < 6; i++)
1389 				writeb(netdev->dev_addr[i], regs->PAR + i);
1390 		}
1391 
1392 		/*
1393 		 *	clear Pre_ACPI bit.
1394 		 */
1395 		BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1396 		mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1397 		mac_set_dma_length(regs, vptr->options.DMA_length);
1398 
1399 		writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1400 		/*
1401 		 *	Back off algorithm use original IEEE standard
1402 		 */
1403 		BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1404 
1405 		/*
1406 		 *	Init CAM filter
1407 		 */
1408 		velocity_init_cam_filter(vptr);
1409 
1410 		/*
1411 		 *	Set packet filter: Receive directed and broadcast address
1412 		 */
1413 		velocity_set_multi(netdev);
1414 
1415 		/*
1416 		 *	Enable MII auto-polling
1417 		 */
1418 		enable_mii_autopoll(regs);
1419 
1420 		setup_adaptive_interrupts(vptr);
1421 
1422 		writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1423 		writew(vptr->options.numrx - 1, &regs->RDCSize);
1424 		mac_rx_queue_run(regs);
1425 		mac_rx_queue_wake(regs);
1426 
1427 		writew(vptr->options.numtx - 1, &regs->TDCSize);
1428 
1429 		for (i = 0; i < vptr->tx.numq; i++) {
1430 			writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1431 			mac_tx_queue_run(regs, i);
1432 		}
1433 
1434 		init_flow_control_register(vptr);
1435 
1436 		writel(CR0_STOP, &regs->CR0Clr);
1437 		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1438 
1439 		mii_status = velocity_get_opt_media_mode(vptr);
1440 		netif_stop_queue(netdev);
1441 
1442 		mii_init(vptr, mii_status);
1443 
1444 		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1445 			velocity_print_link_status(vptr);
1446 			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1447 				netif_wake_queue(netdev);
1448 		}
1449 
1450 		enable_flow_control_ability(vptr);
1451 		mac_hw_mibs_init(regs);
1452 		mac_write_int_mask(vptr->int_mask, regs);
1453 		mac_clear_isr(regs);
1454 
1455 	}
1456 }
1457 
1458 static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1459 {
1460 	struct mac_regs __iomem *regs = vptr->mac_regs;
1461 	int avail, dirty, unusable;
1462 
1463 	/*
1464 	 * RD number must be equal to 4X per hardware spec
1465 	 * (programming guide rev 1.20, p.13)
1466 	 */
1467 	if (vptr->rx.filled < 4)
1468 		return;
1469 
1470 	wmb();
1471 
1472 	unusable = vptr->rx.filled & 0x0003;
1473 	dirty = vptr->rx.dirty - unusable;
1474 	for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1475 		dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1476 		vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1477 	}
1478 
1479 	writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1480 	vptr->rx.filled = unusable;
1481 }
1482 
1483 /**
1484  *	velocity_init_dma_rings	-	set up DMA rings
1485  *	@vptr: Velocity to set up
1486  *
1487  *	Allocate PCI mapped DMA rings for the receive and transmit layer
1488  *	to use.
1489  */
1490 static int velocity_init_dma_rings(struct velocity_info *vptr)
1491 {
1492 	struct velocity_opt *opt = &vptr->options;
1493 	const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1494 	const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1495 	dma_addr_t pool_dma;
1496 	void *pool;
1497 	unsigned int i;
1498 
1499 	/*
1500 	 * Allocate all RD/TD rings a single pool.
1501 	 *
1502 	 * dma_alloc_coherent() fulfills the requirement for 64 bytes
1503 	 * alignment
1504 	 */
1505 	pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1506 				    rx_ring_size, &pool_dma, GFP_ATOMIC);
1507 	if (!pool) {
1508 		dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1509 			vptr->netdev->name);
1510 		return -ENOMEM;
1511 	}
1512 
1513 	vptr->rx.ring = pool;
1514 	vptr->rx.pool_dma = pool_dma;
1515 
1516 	pool += rx_ring_size;
1517 	pool_dma += rx_ring_size;
1518 
1519 	for (i = 0; i < vptr->tx.numq; i++) {
1520 		vptr->tx.rings[i] = pool;
1521 		vptr->tx.pool_dma[i] = pool_dma;
1522 		pool += tx_ring_size;
1523 		pool_dma += tx_ring_size;
1524 	}
1525 
1526 	return 0;
1527 }
1528 
1529 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1530 {
1531 	vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1532 }
1533 
1534 /**
1535  *	velocity_alloc_rx_buf	-	allocate aligned receive buffer
1536  *	@vptr: velocity
1537  *	@idx: ring index
1538  *
1539  *	Allocate a new full sized buffer for the reception of a frame and
1540  *	map it into PCI space for the hardware to use. The hardware
1541  *	requires *64* byte alignment of the buffer which makes life
1542  *	less fun than would be ideal.
1543  */
1544 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1545 {
1546 	struct rx_desc *rd = &(vptr->rx.ring[idx]);
1547 	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1548 
1549 	rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1550 	if (rd_info->skb == NULL)
1551 		return -ENOMEM;
1552 
1553 	/*
1554 	 *	Do the gymnastics to get the buffer head for data at
1555 	 *	64byte alignment.
1556 	 */
1557 	skb_reserve(rd_info->skb,
1558 			64 - ((unsigned long) rd_info->skb->data & 63));
1559 	rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1560 					vptr->rx.buf_sz, DMA_FROM_DEVICE);
1561 
1562 	/*
1563 	 *	Fill in the descriptor to match
1564 	 */
1565 
1566 	*((u32 *) & (rd->rdesc0)) = 0;
1567 	rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1568 	rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1569 	rd->pa_high = 0;
1570 	return 0;
1571 }
1572 
1573 
1574 static int velocity_rx_refill(struct velocity_info *vptr)
1575 {
1576 	int dirty = vptr->rx.dirty, done = 0;
1577 
1578 	do {
1579 		struct rx_desc *rd = vptr->rx.ring + dirty;
1580 
1581 		/* Fine for an all zero Rx desc at init time as well */
1582 		if (rd->rdesc0.len & OWNED_BY_NIC)
1583 			break;
1584 
1585 		if (!vptr->rx.info[dirty].skb) {
1586 			if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1587 				break;
1588 		}
1589 		done++;
1590 		dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1591 	} while (dirty != vptr->rx.curr);
1592 
1593 	if (done) {
1594 		vptr->rx.dirty = dirty;
1595 		vptr->rx.filled += done;
1596 	}
1597 
1598 	return done;
1599 }
1600 
1601 /**
1602  *	velocity_free_rd_ring	-	free receive ring
1603  *	@vptr: velocity to clean up
1604  *
1605  *	Free the receive buffers for each ring slot and any
1606  *	attached socket buffers that need to go away.
1607  */
1608 static void velocity_free_rd_ring(struct velocity_info *vptr)
1609 {
1610 	int i;
1611 
1612 	if (vptr->rx.info == NULL)
1613 		return;
1614 
1615 	for (i = 0; i < vptr->options.numrx; i++) {
1616 		struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1617 		struct rx_desc *rd = vptr->rx.ring + i;
1618 
1619 		memset(rd, 0, sizeof(*rd));
1620 
1621 		if (!rd_info->skb)
1622 			continue;
1623 		dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1624 				 DMA_FROM_DEVICE);
1625 		rd_info->skb_dma = 0;
1626 
1627 		dev_kfree_skb(rd_info->skb);
1628 		rd_info->skb = NULL;
1629 	}
1630 
1631 	kfree(vptr->rx.info);
1632 	vptr->rx.info = NULL;
1633 }
1634 
1635 /**
1636  *	velocity_init_rd_ring	-	set up receive ring
1637  *	@vptr: velocity to configure
1638  *
1639  *	Allocate and set up the receive buffers for each ring slot and
1640  *	assign them to the network adapter.
1641  */
1642 static int velocity_init_rd_ring(struct velocity_info *vptr)
1643 {
1644 	int ret = -ENOMEM;
1645 
1646 	vptr->rx.info = kcalloc(vptr->options.numrx,
1647 				sizeof(struct velocity_rd_info), GFP_KERNEL);
1648 	if (!vptr->rx.info)
1649 		goto out;
1650 
1651 	velocity_init_rx_ring_indexes(vptr);
1652 
1653 	if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1654 		netdev_err(vptr->netdev, "failed to allocate RX buffer\n");
1655 		velocity_free_rd_ring(vptr);
1656 		goto out;
1657 	}
1658 
1659 	ret = 0;
1660 out:
1661 	return ret;
1662 }
1663 
1664 /**
1665  *	velocity_init_td_ring	-	set up transmit ring
1666  *	@vptr:	velocity
1667  *
1668  *	Set up the transmit ring and chain the ring pointers together.
1669  *	Returns zero on success or a negative posix errno code for
1670  *	failure.
1671  */
1672 static int velocity_init_td_ring(struct velocity_info *vptr)
1673 {
1674 	int j;
1675 
1676 	/* Init the TD ring entries */
1677 	for (j = 0; j < vptr->tx.numq; j++) {
1678 
1679 		vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1680 					    sizeof(struct velocity_td_info),
1681 					    GFP_KERNEL);
1682 		if (!vptr->tx.infos[j])	{
1683 			while (--j >= 0)
1684 				kfree(vptr->tx.infos[j]);
1685 			return -ENOMEM;
1686 		}
1687 
1688 		vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1689 	}
1690 	return 0;
1691 }
1692 
1693 /**
1694  *	velocity_free_dma_rings	-	free PCI ring pointers
1695  *	@vptr: Velocity to free from
1696  *
1697  *	Clean up the PCI ring buffers allocated to this velocity.
1698  */
1699 static void velocity_free_dma_rings(struct velocity_info *vptr)
1700 {
1701 	const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1702 		vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1703 
1704 	dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1705 }
1706 
1707 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1708 {
1709 	int ret;
1710 
1711 	velocity_set_rxbufsize(vptr, mtu);
1712 
1713 	ret = velocity_init_dma_rings(vptr);
1714 	if (ret < 0)
1715 		goto out;
1716 
1717 	ret = velocity_init_rd_ring(vptr);
1718 	if (ret < 0)
1719 		goto err_free_dma_rings_0;
1720 
1721 	ret = velocity_init_td_ring(vptr);
1722 	if (ret < 0)
1723 		goto err_free_rd_ring_1;
1724 out:
1725 	return ret;
1726 
1727 err_free_rd_ring_1:
1728 	velocity_free_rd_ring(vptr);
1729 err_free_dma_rings_0:
1730 	velocity_free_dma_rings(vptr);
1731 	goto out;
1732 }
1733 
1734 /**
1735  *	velocity_free_tx_buf	-	free transmit buffer
1736  *	@vptr: velocity
1737  *	@tdinfo: buffer
1738  *
1739  *	Release an transmit buffer. If the buffer was preallocated then
1740  *	recycle it, if not then unmap the buffer.
1741  */
1742 static void velocity_free_tx_buf(struct velocity_info *vptr,
1743 		struct velocity_td_info *tdinfo, struct tx_desc *td)
1744 {
1745 	struct sk_buff *skb = tdinfo->skb;
1746 	int i;
1747 
1748 	/*
1749 	 *	Don't unmap the pre-allocated tx_bufs
1750 	 */
1751 	for (i = 0; i < tdinfo->nskb_dma; i++) {
1752 		size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1753 
1754 		/* For scatter-gather */
1755 		if (skb_shinfo(skb)->nr_frags > 0)
1756 			pktlen = max_t(size_t, pktlen,
1757 				       td->td_buf[i].size & ~TD_QUEUE);
1758 
1759 		dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1760 				 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1761 	}
1762 	dev_consume_skb_irq(skb);
1763 	tdinfo->skb = NULL;
1764 }
1765 
1766 /*
1767  *	FIXME: could we merge this with velocity_free_tx_buf ?
1768  */
1769 static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1770 							 int q, int n)
1771 {
1772 	struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1773 	int i;
1774 
1775 	if (td_info == NULL)
1776 		return;
1777 
1778 	if (td_info->skb) {
1779 		for (i = 0; i < td_info->nskb_dma; i++) {
1780 			if (td_info->skb_dma[i]) {
1781 				dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1782 					td_info->skb->len, DMA_TO_DEVICE);
1783 				td_info->skb_dma[i] = 0;
1784 			}
1785 		}
1786 		dev_kfree_skb(td_info->skb);
1787 		td_info->skb = NULL;
1788 	}
1789 }
1790 
1791 /**
1792  *	velocity_free_td_ring	-	free td ring
1793  *	@vptr: velocity
1794  *
1795  *	Free up the transmit ring for this particular velocity adapter.
1796  *	We free the ring contents but not the ring itself.
1797  */
1798 static void velocity_free_td_ring(struct velocity_info *vptr)
1799 {
1800 	int i, j;
1801 
1802 	for (j = 0; j < vptr->tx.numq; j++) {
1803 		if (vptr->tx.infos[j] == NULL)
1804 			continue;
1805 		for (i = 0; i < vptr->options.numtx; i++)
1806 			velocity_free_td_ring_entry(vptr, j, i);
1807 
1808 		kfree(vptr->tx.infos[j]);
1809 		vptr->tx.infos[j] = NULL;
1810 	}
1811 }
1812 
1813 static void velocity_free_rings(struct velocity_info *vptr)
1814 {
1815 	velocity_free_td_ring(vptr);
1816 	velocity_free_rd_ring(vptr);
1817 	velocity_free_dma_rings(vptr);
1818 }
1819 
1820 /**
1821  *	velocity_error	-	handle error from controller
1822  *	@vptr: velocity
1823  *	@status: card status
1824  *
1825  *	Process an error report from the hardware and attempt to recover
1826  *	the card itself. At the moment we cannot recover from some
1827  *	theoretically impossible errors but this could be fixed using
1828  *	the pci_device_failed logic to bounce the hardware
1829  *
1830  */
1831 static void velocity_error(struct velocity_info *vptr, int status)
1832 {
1833 
1834 	if (status & ISR_TXSTLI) {
1835 		struct mac_regs __iomem *regs = vptr->mac_regs;
1836 
1837 		netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n",
1838 			   readw(&regs->TDIdx[0]));
1839 		BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1840 		writew(TRDCSR_RUN, &regs->TDCSRClr);
1841 		netif_stop_queue(vptr->netdev);
1842 
1843 		/* FIXME: port over the pci_device_failed code and use it
1844 		   here */
1845 	}
1846 
1847 	if (status & ISR_SRCI) {
1848 		struct mac_regs __iomem *regs = vptr->mac_regs;
1849 		int linked;
1850 
1851 		if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1852 			vptr->mii_status = check_connection_type(regs);
1853 
1854 			/*
1855 			 *	If it is a 3119, disable frame bursting in
1856 			 *	halfduplex mode and enable it in fullduplex
1857 			 *	 mode
1858 			 */
1859 			if (vptr->rev_id < REV_ID_VT3216_A0) {
1860 				if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1861 					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1862 				else
1863 					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1864 			}
1865 			/*
1866 			 *	Only enable CD heart beat counter in 10HD mode
1867 			 */
1868 			if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1869 				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1870 			else
1871 				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1872 
1873 			setup_queue_timers(vptr);
1874 		}
1875 		/*
1876 		 *	Get link status from PHYSR0
1877 		 */
1878 		linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1879 
1880 		if (linked) {
1881 			vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1882 			netif_carrier_on(vptr->netdev);
1883 		} else {
1884 			vptr->mii_status |= VELOCITY_LINK_FAIL;
1885 			netif_carrier_off(vptr->netdev);
1886 		}
1887 
1888 		velocity_print_link_status(vptr);
1889 		enable_flow_control_ability(vptr);
1890 
1891 		/*
1892 		 *	Re-enable auto-polling because SRCI will disable
1893 		 *	auto-polling
1894 		 */
1895 
1896 		enable_mii_autopoll(regs);
1897 
1898 		if (vptr->mii_status & VELOCITY_LINK_FAIL)
1899 			netif_stop_queue(vptr->netdev);
1900 		else
1901 			netif_wake_queue(vptr->netdev);
1902 
1903 	}
1904 	if (status & ISR_MIBFI)
1905 		velocity_update_hw_mibs(vptr);
1906 	if (status & ISR_LSTEI)
1907 		mac_rx_queue_wake(vptr->mac_regs);
1908 }
1909 
1910 /**
1911  *	tx_srv		-	transmit interrupt service
1912  *	@vptr; Velocity
1913  *
1914  *	Scan the queues looking for transmitted packets that
1915  *	we can complete and clean up. Update any statistics as
1916  *	necessary/
1917  */
1918 static int velocity_tx_srv(struct velocity_info *vptr)
1919 {
1920 	struct tx_desc *td;
1921 	int qnum;
1922 	int full = 0;
1923 	int idx;
1924 	int works = 0;
1925 	struct velocity_td_info *tdinfo;
1926 	struct net_device_stats *stats = &vptr->netdev->stats;
1927 
1928 	for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1929 		for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1930 			idx = (idx + 1) % vptr->options.numtx) {
1931 
1932 			/*
1933 			 *	Get Tx Descriptor
1934 			 */
1935 			td = &(vptr->tx.rings[qnum][idx]);
1936 			tdinfo = &(vptr->tx.infos[qnum][idx]);
1937 
1938 			if (td->tdesc0.len & OWNED_BY_NIC)
1939 				break;
1940 
1941 			if ((works++ > 15))
1942 				break;
1943 
1944 			if (td->tdesc0.TSR & TSR0_TERR) {
1945 				stats->tx_errors++;
1946 				stats->tx_dropped++;
1947 				if (td->tdesc0.TSR & TSR0_CDH)
1948 					stats->tx_heartbeat_errors++;
1949 				if (td->tdesc0.TSR & TSR0_CRS)
1950 					stats->tx_carrier_errors++;
1951 				if (td->tdesc0.TSR & TSR0_ABT)
1952 					stats->tx_aborted_errors++;
1953 				if (td->tdesc0.TSR & TSR0_OWC)
1954 					stats->tx_window_errors++;
1955 			} else {
1956 				stats->tx_packets++;
1957 				stats->tx_bytes += tdinfo->skb->len;
1958 			}
1959 			velocity_free_tx_buf(vptr, tdinfo, td);
1960 			vptr->tx.used[qnum]--;
1961 		}
1962 		vptr->tx.tail[qnum] = idx;
1963 
1964 		if (AVAIL_TD(vptr, qnum) < 1)
1965 			full = 1;
1966 	}
1967 	/*
1968 	 *	Look to see if we should kick the transmit network
1969 	 *	layer for more work.
1970 	 */
1971 	if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1972 	    (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1973 		netif_wake_queue(vptr->netdev);
1974 	}
1975 	return works;
1976 }
1977 
1978 /**
1979  *	velocity_rx_csum	-	checksum process
1980  *	@rd: receive packet descriptor
1981  *	@skb: network layer packet buffer
1982  *
1983  *	Process the status bits for the received packet and determine
1984  *	if the checksum was computed and verified by the hardware
1985  */
1986 static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1987 {
1988 	skb_checksum_none_assert(skb);
1989 
1990 	if (rd->rdesc1.CSM & CSM_IPKT) {
1991 		if (rd->rdesc1.CSM & CSM_IPOK) {
1992 			if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1993 					(rd->rdesc1.CSM & CSM_UDPKT)) {
1994 				if (!(rd->rdesc1.CSM & CSM_TUPOK))
1995 					return;
1996 			}
1997 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1998 		}
1999 	}
2000 }
2001 
2002 /**
2003  *	velocity_rx_copy	-	in place Rx copy for small packets
2004  *	@rx_skb: network layer packet buffer candidate
2005  *	@pkt_size: received data size
2006  *	@rd: receive packet descriptor
2007  *	@dev: network device
2008  *
2009  *	Replace the current skb that is scheduled for Rx processing by a
2010  *	shorter, immediately allocated skb, if the received packet is small
2011  *	enough. This function returns a negative value if the received
2012  *	packet is too big or if memory is exhausted.
2013  */
2014 static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
2015 			    struct velocity_info *vptr)
2016 {
2017 	int ret = -1;
2018 	if (pkt_size < rx_copybreak) {
2019 		struct sk_buff *new_skb;
2020 
2021 		new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
2022 		if (new_skb) {
2023 			new_skb->ip_summed = rx_skb[0]->ip_summed;
2024 			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2025 			*rx_skb = new_skb;
2026 			ret = 0;
2027 		}
2028 
2029 	}
2030 	return ret;
2031 }
2032 
2033 /**
2034  *	velocity_iph_realign	-	IP header alignment
2035  *	@vptr: velocity we are handling
2036  *	@skb: network layer packet buffer
2037  *	@pkt_size: received data size
2038  *
2039  *	Align IP header on a 2 bytes boundary. This behavior can be
2040  *	configured by the user.
2041  */
2042 static inline void velocity_iph_realign(struct velocity_info *vptr,
2043 					struct sk_buff *skb, int pkt_size)
2044 {
2045 	if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2046 		memmove(skb->data + 2, skb->data, pkt_size);
2047 		skb_reserve(skb, 2);
2048 	}
2049 }
2050 
2051 /**
2052  *	velocity_receive_frame	-	received packet processor
2053  *	@vptr: velocity we are handling
2054  *	@idx: ring index
2055  *
2056  *	A packet has arrived. We process the packet and if appropriate
2057  *	pass the frame up the network stack
2058  */
2059 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2060 {
2061 	struct net_device_stats *stats = &vptr->netdev->stats;
2062 	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2063 	struct rx_desc *rd = &(vptr->rx.ring[idx]);
2064 	int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2065 	struct sk_buff *skb;
2066 
2067 	if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2068 		if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2069 			netdev_err(vptr->netdev, "received frame spans multiple RDs\n");
2070 		stats->rx_length_errors++;
2071 		return -EINVAL;
2072 	}
2073 
2074 	if (rd->rdesc0.RSR & RSR_MAR)
2075 		stats->multicast++;
2076 
2077 	skb = rd_info->skb;
2078 
2079 	dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2080 				    vptr->rx.buf_sz, DMA_FROM_DEVICE);
2081 
2082 	velocity_rx_csum(rd, skb);
2083 
2084 	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2085 		velocity_iph_realign(vptr, skb, pkt_len);
2086 		rd_info->skb = NULL;
2087 		dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2088 				 DMA_FROM_DEVICE);
2089 	} else {
2090 		dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2091 					   vptr->rx.buf_sz, DMA_FROM_DEVICE);
2092 	}
2093 
2094 	skb_put(skb, pkt_len - 4);
2095 	skb->protocol = eth_type_trans(skb, vptr->netdev);
2096 
2097 	if (rd->rdesc0.RSR & RSR_DETAG) {
2098 		u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2099 
2100 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2101 	}
2102 	netif_receive_skb(skb);
2103 
2104 	stats->rx_bytes += pkt_len;
2105 	stats->rx_packets++;
2106 
2107 	return 0;
2108 }
2109 
2110 /**
2111  *	velocity_rx_srv		-	service RX interrupt
2112  *	@vptr: velocity
2113  *
2114  *	Walk the receive ring of the velocity adapter and remove
2115  *	any received packets from the receive queue. Hand the ring
2116  *	slots back to the adapter for reuse.
2117  */
2118 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2119 {
2120 	struct net_device_stats *stats = &vptr->netdev->stats;
2121 	int rd_curr = vptr->rx.curr;
2122 	int works = 0;
2123 
2124 	while (works < budget_left) {
2125 		struct rx_desc *rd = vptr->rx.ring + rd_curr;
2126 
2127 		if (!vptr->rx.info[rd_curr].skb)
2128 			break;
2129 
2130 		if (rd->rdesc0.len & OWNED_BY_NIC)
2131 			break;
2132 
2133 		rmb();
2134 
2135 		/*
2136 		 *	Don't drop CE or RL error frame although RXOK is off
2137 		 */
2138 		if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2139 			if (velocity_receive_frame(vptr, rd_curr) < 0)
2140 				stats->rx_dropped++;
2141 		} else {
2142 			if (rd->rdesc0.RSR & RSR_CRC)
2143 				stats->rx_crc_errors++;
2144 			if (rd->rdesc0.RSR & RSR_FAE)
2145 				stats->rx_frame_errors++;
2146 
2147 			stats->rx_dropped++;
2148 		}
2149 
2150 		rd->size |= RX_INTEN;
2151 
2152 		rd_curr++;
2153 		if (rd_curr >= vptr->options.numrx)
2154 			rd_curr = 0;
2155 		works++;
2156 	}
2157 
2158 	vptr->rx.curr = rd_curr;
2159 
2160 	if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2161 		velocity_give_many_rx_descs(vptr);
2162 
2163 	VAR_USED(stats);
2164 	return works;
2165 }
2166 
2167 static int velocity_poll(struct napi_struct *napi, int budget)
2168 {
2169 	struct velocity_info *vptr = container_of(napi,
2170 			struct velocity_info, napi);
2171 	unsigned int rx_done;
2172 	unsigned long flags;
2173 
2174 	/*
2175 	 * Do rx and tx twice for performance (taken from the VIA
2176 	 * out-of-tree driver).
2177 	 */
2178 	rx_done = velocity_rx_srv(vptr, budget);
2179 	spin_lock_irqsave(&vptr->lock, flags);
2180 	velocity_tx_srv(vptr);
2181 	/* If budget not fully consumed, exit the polling mode */
2182 	if (rx_done < budget) {
2183 		napi_complete_done(napi, rx_done);
2184 		mac_enable_int(vptr->mac_regs);
2185 	}
2186 	spin_unlock_irqrestore(&vptr->lock, flags);
2187 
2188 	return rx_done;
2189 }
2190 
2191 /**
2192  *	velocity_intr		-	interrupt callback
2193  *	@irq: interrupt number
2194  *	@dev_instance: interrupting device
2195  *
2196  *	Called whenever an interrupt is generated by the velocity
2197  *	adapter IRQ line. We may not be the source of the interrupt
2198  *	and need to identify initially if we are, and if not exit as
2199  *	efficiently as possible.
2200  */
2201 static irqreturn_t velocity_intr(int irq, void *dev_instance)
2202 {
2203 	struct net_device *dev = dev_instance;
2204 	struct velocity_info *vptr = netdev_priv(dev);
2205 	u32 isr_status;
2206 
2207 	spin_lock(&vptr->lock);
2208 	isr_status = mac_read_isr(vptr->mac_regs);
2209 
2210 	/* Not us ? */
2211 	if (isr_status == 0) {
2212 		spin_unlock(&vptr->lock);
2213 		return IRQ_NONE;
2214 	}
2215 
2216 	/* Ack the interrupt */
2217 	mac_write_isr(vptr->mac_regs, isr_status);
2218 
2219 	if (likely(napi_schedule_prep(&vptr->napi))) {
2220 		mac_disable_int(vptr->mac_regs);
2221 		__napi_schedule(&vptr->napi);
2222 	}
2223 
2224 	if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2225 		velocity_error(vptr, isr_status);
2226 
2227 	spin_unlock(&vptr->lock);
2228 
2229 	return IRQ_HANDLED;
2230 }
2231 
2232 /**
2233  *	velocity_open		-	interface activation callback
2234  *	@dev: network layer device to open
2235  *
2236  *	Called when the network layer brings the interface up. Returns
2237  *	a negative posix error code on failure, or zero on success.
2238  *
2239  *	All the ring allocation and set up is done on open for this
2240  *	adapter to minimise memory usage when inactive
2241  */
2242 static int velocity_open(struct net_device *dev)
2243 {
2244 	struct velocity_info *vptr = netdev_priv(dev);
2245 	int ret;
2246 
2247 	ret = velocity_init_rings(vptr, dev->mtu);
2248 	if (ret < 0)
2249 		goto out;
2250 
2251 	/* Ensure chip is running */
2252 	velocity_set_power_state(vptr, PCI_D0);
2253 
2254 	velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2255 
2256 	ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2257 			  dev->name, dev);
2258 	if (ret < 0) {
2259 		/* Power down the chip */
2260 		velocity_set_power_state(vptr, PCI_D3hot);
2261 		velocity_free_rings(vptr);
2262 		goto out;
2263 	}
2264 
2265 	velocity_give_many_rx_descs(vptr);
2266 
2267 	mac_enable_int(vptr->mac_regs);
2268 	netif_start_queue(dev);
2269 	napi_enable(&vptr->napi);
2270 	vptr->flags |= VELOCITY_FLAGS_OPENED;
2271 out:
2272 	return ret;
2273 }
2274 
2275 /**
2276  *	velocity_shutdown	-	shut down the chip
2277  *	@vptr: velocity to deactivate
2278  *
2279  *	Shuts down the internal operations of the velocity and
2280  *	disables interrupts, autopolling, transmit and receive
2281  */
2282 static void velocity_shutdown(struct velocity_info *vptr)
2283 {
2284 	struct mac_regs __iomem *regs = vptr->mac_regs;
2285 	mac_disable_int(regs);
2286 	writel(CR0_STOP, &regs->CR0Set);
2287 	writew(0xFFFF, &regs->TDCSRClr);
2288 	writeb(0xFF, &regs->RDCSRClr);
2289 	safe_disable_mii_autopoll(regs);
2290 	mac_clear_isr(regs);
2291 }
2292 
2293 /**
2294  *	velocity_change_mtu	-	MTU change callback
2295  *	@dev: network device
2296  *	@new_mtu: desired MTU
2297  *
2298  *	Handle requests from the networking layer for MTU change on
2299  *	this interface. It gets called on a change by the network layer.
2300  *	Return zero for success or negative posix error code.
2301  */
2302 static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2303 {
2304 	struct velocity_info *vptr = netdev_priv(dev);
2305 	int ret = 0;
2306 
2307 	if (!netif_running(dev)) {
2308 		dev->mtu = new_mtu;
2309 		goto out_0;
2310 	}
2311 
2312 	if (dev->mtu != new_mtu) {
2313 		struct velocity_info *tmp_vptr;
2314 		unsigned long flags;
2315 		struct rx_info rx;
2316 		struct tx_info tx;
2317 
2318 		tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2319 		if (!tmp_vptr) {
2320 			ret = -ENOMEM;
2321 			goto out_0;
2322 		}
2323 
2324 		tmp_vptr->netdev = dev;
2325 		tmp_vptr->pdev = vptr->pdev;
2326 		tmp_vptr->dev = vptr->dev;
2327 		tmp_vptr->options = vptr->options;
2328 		tmp_vptr->tx.numq = vptr->tx.numq;
2329 
2330 		ret = velocity_init_rings(tmp_vptr, new_mtu);
2331 		if (ret < 0)
2332 			goto out_free_tmp_vptr_1;
2333 
2334 		napi_disable(&vptr->napi);
2335 
2336 		spin_lock_irqsave(&vptr->lock, flags);
2337 
2338 		netif_stop_queue(dev);
2339 		velocity_shutdown(vptr);
2340 
2341 		rx = vptr->rx;
2342 		tx = vptr->tx;
2343 
2344 		vptr->rx = tmp_vptr->rx;
2345 		vptr->tx = tmp_vptr->tx;
2346 
2347 		tmp_vptr->rx = rx;
2348 		tmp_vptr->tx = tx;
2349 
2350 		dev->mtu = new_mtu;
2351 
2352 		velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2353 
2354 		velocity_give_many_rx_descs(vptr);
2355 
2356 		napi_enable(&vptr->napi);
2357 
2358 		mac_enable_int(vptr->mac_regs);
2359 		netif_start_queue(dev);
2360 
2361 		spin_unlock_irqrestore(&vptr->lock, flags);
2362 
2363 		velocity_free_rings(tmp_vptr);
2364 
2365 out_free_tmp_vptr_1:
2366 		kfree(tmp_vptr);
2367 	}
2368 out_0:
2369 	return ret;
2370 }
2371 
2372 #ifdef CONFIG_NET_POLL_CONTROLLER
2373 /**
2374  *  velocity_poll_controller		-	Velocity Poll controller function
2375  *  @dev: network device
2376  *
2377  *
2378  *  Used by NETCONSOLE and other diagnostic tools to allow network I/P
2379  *  with interrupts disabled.
2380  */
2381 static void velocity_poll_controller(struct net_device *dev)
2382 {
2383 	disable_irq(dev->irq);
2384 	velocity_intr(dev->irq, dev);
2385 	enable_irq(dev->irq);
2386 }
2387 #endif
2388 
2389 /**
2390  *	velocity_mii_ioctl		-	MII ioctl handler
2391  *	@dev: network device
2392  *	@ifr: the ifreq block for the ioctl
2393  *	@cmd: the command
2394  *
2395  *	Process MII requests made via ioctl from the network layer. These
2396  *	are used by tools like kudzu to interrogate the link state of the
2397  *	hardware
2398  */
2399 static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2400 {
2401 	struct velocity_info *vptr = netdev_priv(dev);
2402 	struct mac_regs __iomem *regs = vptr->mac_regs;
2403 	unsigned long flags;
2404 	struct mii_ioctl_data *miidata = if_mii(ifr);
2405 	int err;
2406 
2407 	switch (cmd) {
2408 	case SIOCGMIIPHY:
2409 		miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2410 		break;
2411 	case SIOCGMIIREG:
2412 		if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2413 			return -ETIMEDOUT;
2414 		break;
2415 	case SIOCSMIIREG:
2416 		spin_lock_irqsave(&vptr->lock, flags);
2417 		err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2418 		spin_unlock_irqrestore(&vptr->lock, flags);
2419 		check_connection_type(vptr->mac_regs);
2420 		if (err)
2421 			return err;
2422 		break;
2423 	default:
2424 		return -EOPNOTSUPP;
2425 	}
2426 	return 0;
2427 }
2428 
2429 /**
2430  *	velocity_ioctl		-	ioctl entry point
2431  *	@dev: network device
2432  *	@rq: interface request ioctl
2433  *	@cmd: command code
2434  *
2435  *	Called when the user issues an ioctl request to the network
2436  *	device in question. The velocity interface supports MII.
2437  */
2438 static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2439 {
2440 	struct velocity_info *vptr = netdev_priv(dev);
2441 	int ret;
2442 
2443 	/* If we are asked for information and the device is power
2444 	   saving then we need to bring the device back up to talk to it */
2445 
2446 	if (!netif_running(dev))
2447 		velocity_set_power_state(vptr, PCI_D0);
2448 
2449 	switch (cmd) {
2450 	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
2451 	case SIOCGMIIREG:	/* Read MII PHY register. */
2452 	case SIOCSMIIREG:	/* Write to MII PHY register. */
2453 		ret = velocity_mii_ioctl(dev, rq, cmd);
2454 		break;
2455 
2456 	default:
2457 		ret = -EOPNOTSUPP;
2458 	}
2459 	if (!netif_running(dev))
2460 		velocity_set_power_state(vptr, PCI_D3hot);
2461 
2462 
2463 	return ret;
2464 }
2465 
2466 /**
2467  *	velocity_get_status	-	statistics callback
2468  *	@dev: network device
2469  *
2470  *	Callback from the network layer to allow driver statistics
2471  *	to be resynchronized with hardware collected state. In the
2472  *	case of the velocity we need to pull the MIB counters from
2473  *	the hardware into the counters before letting the network
2474  *	layer display them.
2475  */
2476 static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2477 {
2478 	struct velocity_info *vptr = netdev_priv(dev);
2479 
2480 	/* If the hardware is down, don't touch MII */
2481 	if (!netif_running(dev))
2482 		return &dev->stats;
2483 
2484 	spin_lock_irq(&vptr->lock);
2485 	velocity_update_hw_mibs(vptr);
2486 	spin_unlock_irq(&vptr->lock);
2487 
2488 	dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2489 	dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2490 	dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2491 
2492 //  unsigned long   rx_dropped;     /* no space in linux buffers    */
2493 	dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2494 	/* detailed rx_errors: */
2495 //  unsigned long   rx_length_errors;
2496 //  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2497 	dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2498 //  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2499 //  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2500 //  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2501 
2502 	/* detailed tx_errors */
2503 //  unsigned long   tx_fifo_errors;
2504 
2505 	return &dev->stats;
2506 }
2507 
2508 /**
2509  *	velocity_close		-	close adapter callback
2510  *	@dev: network device
2511  *
2512  *	Callback from the network layer when the velocity is being
2513  *	deactivated by the network layer
2514  */
2515 static int velocity_close(struct net_device *dev)
2516 {
2517 	struct velocity_info *vptr = netdev_priv(dev);
2518 
2519 	napi_disable(&vptr->napi);
2520 	netif_stop_queue(dev);
2521 	velocity_shutdown(vptr);
2522 
2523 	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2524 		velocity_get_ip(vptr);
2525 
2526 	free_irq(dev->irq, dev);
2527 
2528 	velocity_free_rings(vptr);
2529 
2530 	vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2531 	return 0;
2532 }
2533 
2534 /**
2535  *	velocity_xmit		-	transmit packet callback
2536  *	@skb: buffer to transmit
2537  *	@dev: network device
2538  *
2539  *	Called by the networ layer to request a packet is queued to
2540  *	the velocity. Returns zero on success.
2541  */
2542 static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2543 				 struct net_device *dev)
2544 {
2545 	struct velocity_info *vptr = netdev_priv(dev);
2546 	int qnum = 0;
2547 	struct tx_desc *td_ptr;
2548 	struct velocity_td_info *tdinfo;
2549 	unsigned long flags;
2550 	int pktlen;
2551 	int index, prev;
2552 	int i = 0;
2553 
2554 	if (skb_padto(skb, ETH_ZLEN))
2555 		goto out;
2556 
2557 	/* The hardware can handle at most 7 memory segments, so merge
2558 	 * the skb if there are more */
2559 	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2560 		dev_kfree_skb_any(skb);
2561 		return NETDEV_TX_OK;
2562 	}
2563 
2564 	pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2565 			max_t(unsigned int, skb->len, ETH_ZLEN) :
2566 				skb_headlen(skb);
2567 
2568 	spin_lock_irqsave(&vptr->lock, flags);
2569 
2570 	index = vptr->tx.curr[qnum];
2571 	td_ptr = &(vptr->tx.rings[qnum][index]);
2572 	tdinfo = &(vptr->tx.infos[qnum][index]);
2573 
2574 	td_ptr->tdesc1.TCR = TCR0_TIC;
2575 	td_ptr->td_buf[0].size &= ~TD_QUEUE;
2576 
2577 	/*
2578 	 *	Map the linear network buffer into PCI space and
2579 	 *	add it to the transmit ring.
2580 	 */
2581 	tdinfo->skb = skb;
2582 	tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2583 								DMA_TO_DEVICE);
2584 	td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2585 	td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2586 	td_ptr->td_buf[0].pa_high = 0;
2587 	td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2588 
2589 	/* Handle fragments */
2590 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2591 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2592 
2593 		tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2594 							  frag, 0,
2595 							  skb_frag_size(frag),
2596 							  DMA_TO_DEVICE);
2597 
2598 		td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2599 		td_ptr->td_buf[i + 1].pa_high = 0;
2600 		td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2601 	}
2602 	tdinfo->nskb_dma = i + 1;
2603 
2604 	td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2605 
2606 	if (skb_vlan_tag_present(skb)) {
2607 		td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2608 		td_ptr->tdesc1.TCR |= TCR0_VETAG;
2609 	}
2610 
2611 	/*
2612 	 *	Handle hardware checksum
2613 	 */
2614 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2615 		const struct iphdr *ip = ip_hdr(skb);
2616 		if (ip->protocol == IPPROTO_TCP)
2617 			td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2618 		else if (ip->protocol == IPPROTO_UDP)
2619 			td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2620 		td_ptr->tdesc1.TCR |= TCR0_IPCK;
2621 	}
2622 
2623 	prev = index - 1;
2624 	if (prev < 0)
2625 		prev = vptr->options.numtx - 1;
2626 	td_ptr->tdesc0.len |= OWNED_BY_NIC;
2627 	vptr->tx.used[qnum]++;
2628 	vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2629 
2630 	if (AVAIL_TD(vptr, qnum) < 1)
2631 		netif_stop_queue(dev);
2632 
2633 	td_ptr = &(vptr->tx.rings[qnum][prev]);
2634 	td_ptr->td_buf[0].size |= TD_QUEUE;
2635 	mac_tx_queue_wake(vptr->mac_regs, qnum);
2636 
2637 	spin_unlock_irqrestore(&vptr->lock, flags);
2638 out:
2639 	return NETDEV_TX_OK;
2640 }
2641 
2642 static const struct net_device_ops velocity_netdev_ops = {
2643 	.ndo_open		= velocity_open,
2644 	.ndo_stop		= velocity_close,
2645 	.ndo_start_xmit		= velocity_xmit,
2646 	.ndo_get_stats		= velocity_get_stats,
2647 	.ndo_validate_addr	= eth_validate_addr,
2648 	.ndo_set_mac_address	= eth_mac_addr,
2649 	.ndo_set_rx_mode	= velocity_set_multi,
2650 	.ndo_change_mtu		= velocity_change_mtu,
2651 	.ndo_do_ioctl		= velocity_ioctl,
2652 	.ndo_vlan_rx_add_vid	= velocity_vlan_rx_add_vid,
2653 	.ndo_vlan_rx_kill_vid	= velocity_vlan_rx_kill_vid,
2654 #ifdef CONFIG_NET_POLL_CONTROLLER
2655 	.ndo_poll_controller = velocity_poll_controller,
2656 #endif
2657 };
2658 
2659 /**
2660  *	velocity_init_info	-	init private data
2661  *	@pdev: PCI device
2662  *	@vptr: Velocity info
2663  *	@info: Board type
2664  *
2665  *	Set up the initial velocity_info struct for the device that has been
2666  *	discovered.
2667  */
2668 static void velocity_init_info(struct velocity_info *vptr,
2669 				const struct velocity_info_tbl *info)
2670 {
2671 	vptr->chip_id = info->chip_id;
2672 	vptr->tx.numq = info->txqueue;
2673 	vptr->multicast_limit = MCAM_SIZE;
2674 	spin_lock_init(&vptr->lock);
2675 }
2676 
2677 /**
2678  *	velocity_get_pci_info	-	retrieve PCI info for device
2679  *	@vptr: velocity device
2680  *	@pdev: PCI device it matches
2681  *
2682  *	Retrieve the PCI configuration space data that interests us from
2683  *	the kernel PCI layer
2684  */
2685 static int velocity_get_pci_info(struct velocity_info *vptr)
2686 {
2687 	struct pci_dev *pdev = vptr->pdev;
2688 
2689 	pci_set_master(pdev);
2690 
2691 	vptr->ioaddr = pci_resource_start(pdev, 0);
2692 	vptr->memaddr = pci_resource_start(pdev, 1);
2693 
2694 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2695 		dev_err(&pdev->dev,
2696 			   "region #0 is not an I/O resource, aborting.\n");
2697 		return -EINVAL;
2698 	}
2699 
2700 	if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2701 		dev_err(&pdev->dev,
2702 			   "region #1 is an I/O resource, aborting.\n");
2703 		return -EINVAL;
2704 	}
2705 
2706 	if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2707 		dev_err(&pdev->dev, "region #1 is too small.\n");
2708 		return -EINVAL;
2709 	}
2710 
2711 	return 0;
2712 }
2713 
2714 /**
2715  *	velocity_get_platform_info - retrieve platform info for device
2716  *	@vptr: velocity device
2717  *	@pdev: platform device it matches
2718  *
2719  *	Retrieve the Platform configuration data that interests us
2720  */
2721 static int velocity_get_platform_info(struct velocity_info *vptr)
2722 {
2723 	struct resource res;
2724 	int ret;
2725 
2726 	if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2727 		vptr->no_eeprom = 1;
2728 
2729 	ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2730 	if (ret) {
2731 		dev_err(vptr->dev, "unable to find memory address\n");
2732 		return ret;
2733 	}
2734 
2735 	vptr->memaddr = res.start;
2736 
2737 	if (resource_size(&res) < VELOCITY_IO_SIZE) {
2738 		dev_err(vptr->dev, "memory region is too small.\n");
2739 		return -EINVAL;
2740 	}
2741 
2742 	return 0;
2743 }
2744 
2745 /**
2746  *	velocity_print_info	-	per driver data
2747  *	@vptr: velocity
2748  *
2749  *	Print per driver data as the kernel driver finds Velocity
2750  *	hardware
2751  */
2752 static void velocity_print_info(struct velocity_info *vptr)
2753 {
2754 	netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n",
2755 		    get_chip_name(vptr->chip_id), vptr->netdev->dev_addr);
2756 }
2757 
2758 static u32 velocity_get_link(struct net_device *dev)
2759 {
2760 	struct velocity_info *vptr = netdev_priv(dev);
2761 	struct mac_regs __iomem *regs = vptr->mac_regs;
2762 	return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2763 }
2764 
2765 /**
2766  *	velocity_probe - set up discovered velocity device
2767  *	@pdev: PCI device
2768  *	@ent: PCI device table entry that matched
2769  *	@bustype: bus that device is connected to
2770  *
2771  *	Configure a discovered adapter from scratch. Return a negative
2772  *	errno error code on failure paths.
2773  */
2774 static int velocity_probe(struct device *dev, int irq,
2775 			   const struct velocity_info_tbl *info,
2776 			   enum velocity_bus_type bustype)
2777 {
2778 	struct net_device *netdev;
2779 	int i;
2780 	struct velocity_info *vptr;
2781 	struct mac_regs __iomem *regs;
2782 	int ret = -ENOMEM;
2783 
2784 	/* FIXME: this driver, like almost all other ethernet drivers,
2785 	 * can support more than MAX_UNITS.
2786 	 */
2787 	if (velocity_nics >= MAX_UNITS) {
2788 		dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2789 		return -ENODEV;
2790 	}
2791 
2792 	netdev = alloc_etherdev(sizeof(struct velocity_info));
2793 	if (!netdev)
2794 		goto out;
2795 
2796 	/* Chain it all together */
2797 
2798 	SET_NETDEV_DEV(netdev, dev);
2799 	vptr = netdev_priv(netdev);
2800 
2801 	pr_info_once("%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2802 	pr_info_once("Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2803 	pr_info_once("Copyright (c) 2004 Red Hat Inc.\n");
2804 
2805 	netdev->irq = irq;
2806 	vptr->netdev = netdev;
2807 	vptr->dev = dev;
2808 
2809 	velocity_init_info(vptr, info);
2810 
2811 	if (bustype == BUS_PCI) {
2812 		vptr->pdev = to_pci_dev(dev);
2813 
2814 		ret = velocity_get_pci_info(vptr);
2815 		if (ret < 0)
2816 			goto err_free_dev;
2817 	} else {
2818 		vptr->pdev = NULL;
2819 		ret = velocity_get_platform_info(vptr);
2820 		if (ret < 0)
2821 			goto err_free_dev;
2822 	}
2823 
2824 	regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2825 	if (regs == NULL) {
2826 		ret = -EIO;
2827 		goto err_free_dev;
2828 	}
2829 
2830 	vptr->mac_regs = regs;
2831 	vptr->rev_id = readb(&regs->rev_id);
2832 
2833 	mac_wol_reset(regs);
2834 
2835 	for (i = 0; i < 6; i++)
2836 		netdev->dev_addr[i] = readb(&regs->PAR[i]);
2837 
2838 
2839 	velocity_get_options(&vptr->options, velocity_nics);
2840 
2841 	/*
2842 	 *	Mask out the options cannot be set to the chip
2843 	 */
2844 
2845 	vptr->options.flags &= info->flags;
2846 
2847 	/*
2848 	 *	Enable the chip specified capbilities
2849 	 */
2850 
2851 	vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2852 
2853 	vptr->wol_opts = vptr->options.wol_opts;
2854 	vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2855 
2856 	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2857 
2858 	netdev->netdev_ops = &velocity_netdev_ops;
2859 	netdev->ethtool_ops = &velocity_ethtool_ops;
2860 	netif_napi_add(netdev, &vptr->napi, velocity_poll,
2861 							VELOCITY_NAPI_WEIGHT);
2862 
2863 	netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2864 			   NETIF_F_HW_VLAN_CTAG_TX;
2865 	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2866 			NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2867 			NETIF_F_IP_CSUM;
2868 
2869 	/* MTU range: 64 - 9000 */
2870 	netdev->min_mtu = VELOCITY_MIN_MTU;
2871 	netdev->max_mtu = VELOCITY_MAX_MTU;
2872 
2873 	ret = register_netdev(netdev);
2874 	if (ret < 0)
2875 		goto err_iounmap;
2876 
2877 	if (!velocity_get_link(netdev)) {
2878 		netif_carrier_off(netdev);
2879 		vptr->mii_status |= VELOCITY_LINK_FAIL;
2880 	}
2881 
2882 	velocity_print_info(vptr);
2883 	dev_set_drvdata(vptr->dev, netdev);
2884 
2885 	/* and leave the chip powered down */
2886 
2887 	velocity_set_power_state(vptr, PCI_D3hot);
2888 	velocity_nics++;
2889 out:
2890 	return ret;
2891 
2892 err_iounmap:
2893 	netif_napi_del(&vptr->napi);
2894 	iounmap(regs);
2895 err_free_dev:
2896 	free_netdev(netdev);
2897 	goto out;
2898 }
2899 
2900 /**
2901  *	velocity_remove	- device unplug
2902  *	@dev: device being removed
2903  *
2904  *	Device unload callback. Called on an unplug or on module
2905  *	unload for each active device that is present. Disconnects
2906  *	the device from the network layer and frees all the resources
2907  */
2908 static int velocity_remove(struct device *dev)
2909 {
2910 	struct net_device *netdev = dev_get_drvdata(dev);
2911 	struct velocity_info *vptr = netdev_priv(netdev);
2912 
2913 	unregister_netdev(netdev);
2914 	netif_napi_del(&vptr->napi);
2915 	iounmap(vptr->mac_regs);
2916 	free_netdev(netdev);
2917 	velocity_nics--;
2918 
2919 	return 0;
2920 }
2921 
2922 static int velocity_pci_probe(struct pci_dev *pdev,
2923 			       const struct pci_device_id *ent)
2924 {
2925 	const struct velocity_info_tbl *info =
2926 					&chip_info_table[ent->driver_data];
2927 	int ret;
2928 
2929 	ret = pci_enable_device(pdev);
2930 	if (ret < 0)
2931 		return ret;
2932 
2933 	ret = pci_request_regions(pdev, VELOCITY_NAME);
2934 	if (ret < 0) {
2935 		dev_err(&pdev->dev, "No PCI resources.\n");
2936 		goto fail1;
2937 	}
2938 
2939 	ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2940 	if (ret == 0)
2941 		return 0;
2942 
2943 	pci_release_regions(pdev);
2944 fail1:
2945 	pci_disable_device(pdev);
2946 	return ret;
2947 }
2948 
2949 static void velocity_pci_remove(struct pci_dev *pdev)
2950 {
2951 	velocity_remove(&pdev->dev);
2952 
2953 	pci_release_regions(pdev);
2954 	pci_disable_device(pdev);
2955 }
2956 
2957 static int velocity_platform_probe(struct platform_device *pdev)
2958 {
2959 	const struct of_device_id *of_id;
2960 	const struct velocity_info_tbl *info;
2961 	int irq;
2962 
2963 	of_id = of_match_device(velocity_of_ids, &pdev->dev);
2964 	if (!of_id)
2965 		return -EINVAL;
2966 	info = of_id->data;
2967 
2968 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2969 	if (!irq)
2970 		return -EINVAL;
2971 
2972 	return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2973 }
2974 
2975 static int velocity_platform_remove(struct platform_device *pdev)
2976 {
2977 	velocity_remove(&pdev->dev);
2978 
2979 	return 0;
2980 }
2981 
2982 #ifdef CONFIG_PM_SLEEP
2983 /**
2984  *	wol_calc_crc		-	WOL CRC
2985  *	@pattern: data pattern
2986  *	@mask_pattern: mask
2987  *
2988  *	Compute the wake on lan crc hashes for the packet header
2989  *	we are interested in.
2990  */
2991 static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2992 {
2993 	u16 crc = 0xFFFF;
2994 	u8 mask;
2995 	int i, j;
2996 
2997 	for (i = 0; i < size; i++) {
2998 		mask = mask_pattern[i];
2999 
3000 		/* Skip this loop if the mask equals to zero */
3001 		if (mask == 0x00)
3002 			continue;
3003 
3004 		for (j = 0; j < 8; j++) {
3005 			if ((mask & 0x01) == 0) {
3006 				mask >>= 1;
3007 				continue;
3008 			}
3009 			mask >>= 1;
3010 			crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
3011 		}
3012 	}
3013 	/*	Finally, invert the result once to get the correct data */
3014 	crc = ~crc;
3015 	return bitrev32(crc) >> 16;
3016 }
3017 
3018 /**
3019  *	velocity_set_wol	-	set up for wake on lan
3020  *	@vptr: velocity to set WOL status on
3021  *
3022  *	Set a card up for wake on lan either by unicast or by
3023  *	ARP packet.
3024  *
3025  *	FIXME: check static buffer is safe here
3026  */
3027 static int velocity_set_wol(struct velocity_info *vptr)
3028 {
3029 	struct mac_regs __iomem *regs = vptr->mac_regs;
3030 	enum speed_opt spd_dpx = vptr->options.spd_dpx;
3031 	static u8 buf[256];
3032 	int i;
3033 
3034 	static u32 mask_pattern[2][4] = {
3035 		{0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3036 		{0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}	 /* Magic Packet */
3037 	};
3038 
3039 	writew(0xFFFF, &regs->WOLCRClr);
3040 	writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
3041 	writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
3042 
3043 	/*
3044 	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3045 	   writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
3046 	 */
3047 
3048 	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3049 		writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
3050 
3051 	if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3052 		struct arp_packet *arp = (struct arp_packet *) buf;
3053 		u16 crc;
3054 		memset(buf, 0, sizeof(struct arp_packet) + 7);
3055 
3056 		for (i = 0; i < 4; i++)
3057 			writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
3058 
3059 		arp->type = htons(ETH_P_ARP);
3060 		arp->ar_op = htons(1);
3061 
3062 		memcpy(arp->ar_tip, vptr->ip_addr, 4);
3063 
3064 		crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3065 				(u8 *) & mask_pattern[0][0]);
3066 
3067 		writew(crc, &regs->PatternCRC[0]);
3068 		writew(WOLCR_ARP_EN, &regs->WOLCRSet);
3069 	}
3070 
3071 	BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
3072 	BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
3073 
3074 	writew(0x0FFF, &regs->WOLSRClr);
3075 
3076 	if (spd_dpx == SPD_DPX_1000_FULL)
3077 		goto mac_done;
3078 
3079 	if (spd_dpx != SPD_DPX_AUTO)
3080 		goto advertise_done;
3081 
3082 	if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3083 		if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3084 			MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3085 
3086 		MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3087 	}
3088 
3089 	if (vptr->mii_status & VELOCITY_SPEED_1000)
3090 		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3091 
3092 advertise_done:
3093 	BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
3094 
3095 	{
3096 		u8 GCR;
3097 		GCR = readb(&regs->CHIPGCR);
3098 		GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3099 		writeb(GCR, &regs->CHIPGCR);
3100 	}
3101 
3102 mac_done:
3103 	BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3104 	/* Turn on SWPTAG just before entering power mode */
3105 	BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3106 	/* Go to bed ..... */
3107 	BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3108 
3109 	return 0;
3110 }
3111 
3112 /**
3113  *	velocity_save_context	-	save registers
3114  *	@vptr: velocity
3115  *	@context: buffer for stored context
3116  *
3117  *	Retrieve the current configuration from the velocity hardware
3118  *	and stash it in the context structure, for use by the context
3119  *	restore functions. This allows us to save things we need across
3120  *	power down states
3121  */
3122 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3123 {
3124 	struct mac_regs __iomem *regs = vptr->mac_regs;
3125 	u16 i;
3126 	u8 __iomem *ptr = (u8 __iomem *)regs;
3127 
3128 	for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3129 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3130 
3131 	for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3132 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3133 
3134 	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3135 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3136 
3137 }
3138 
3139 static int velocity_suspend(struct device *dev)
3140 {
3141 	struct net_device *netdev = dev_get_drvdata(dev);
3142 	struct velocity_info *vptr = netdev_priv(netdev);
3143 	unsigned long flags;
3144 
3145 	if (!netif_running(vptr->netdev))
3146 		return 0;
3147 
3148 	netif_device_detach(vptr->netdev);
3149 
3150 	spin_lock_irqsave(&vptr->lock, flags);
3151 	if (vptr->pdev)
3152 		pci_save_state(vptr->pdev);
3153 
3154 	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3155 		velocity_get_ip(vptr);
3156 		velocity_save_context(vptr, &vptr->context);
3157 		velocity_shutdown(vptr);
3158 		velocity_set_wol(vptr);
3159 		if (vptr->pdev)
3160 			pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3161 		velocity_set_power_state(vptr, PCI_D3hot);
3162 	} else {
3163 		velocity_save_context(vptr, &vptr->context);
3164 		velocity_shutdown(vptr);
3165 		if (vptr->pdev)
3166 			pci_disable_device(vptr->pdev);
3167 		velocity_set_power_state(vptr, PCI_D3hot);
3168 	}
3169 
3170 	spin_unlock_irqrestore(&vptr->lock, flags);
3171 	return 0;
3172 }
3173 
3174 /**
3175  *	velocity_restore_context	-	restore registers
3176  *	@vptr: velocity
3177  *	@context: buffer for stored context
3178  *
3179  *	Reload the register configuration from the velocity context
3180  *	created by velocity_save_context.
3181  */
3182 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3183 {
3184 	struct mac_regs __iomem *regs = vptr->mac_regs;
3185 	int i;
3186 	u8 __iomem *ptr = (u8 __iomem *)regs;
3187 
3188 	for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3189 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3190 
3191 	/* Just skip cr0 */
3192 	for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3193 		/* Clear */
3194 		writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3195 		/* Set */
3196 		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3197 	}
3198 
3199 	for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3200 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3201 
3202 	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3203 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3204 
3205 	for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3206 		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3207 }
3208 
3209 static int velocity_resume(struct device *dev)
3210 {
3211 	struct net_device *netdev = dev_get_drvdata(dev);
3212 	struct velocity_info *vptr = netdev_priv(netdev);
3213 	unsigned long flags;
3214 	int i;
3215 
3216 	if (!netif_running(vptr->netdev))
3217 		return 0;
3218 
3219 	velocity_set_power_state(vptr, PCI_D0);
3220 
3221 	if (vptr->pdev) {
3222 		pci_enable_wake(vptr->pdev, PCI_D0, 0);
3223 		pci_restore_state(vptr->pdev);
3224 	}
3225 
3226 	mac_wol_reset(vptr->mac_regs);
3227 
3228 	spin_lock_irqsave(&vptr->lock, flags);
3229 	velocity_restore_context(vptr, &vptr->context);
3230 	velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3231 	mac_disable_int(vptr->mac_regs);
3232 
3233 	velocity_tx_srv(vptr);
3234 
3235 	for (i = 0; i < vptr->tx.numq; i++) {
3236 		if (vptr->tx.used[i])
3237 			mac_tx_queue_wake(vptr->mac_regs, i);
3238 	}
3239 
3240 	mac_enable_int(vptr->mac_regs);
3241 	spin_unlock_irqrestore(&vptr->lock, flags);
3242 	netif_device_attach(vptr->netdev);
3243 
3244 	return 0;
3245 }
3246 #endif	/* CONFIG_PM_SLEEP */
3247 
3248 static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3249 
3250 /*
3251  *	Definition for our device driver. The PCI layer interface
3252  *	uses this to handle all our card discover and plugging
3253  */
3254 static struct pci_driver velocity_pci_driver = {
3255 	.name		= VELOCITY_NAME,
3256 	.id_table	= velocity_pci_id_table,
3257 	.probe		= velocity_pci_probe,
3258 	.remove		= velocity_pci_remove,
3259 	.driver = {
3260 		.pm = &velocity_pm_ops,
3261 	},
3262 };
3263 
3264 static struct platform_driver velocity_platform_driver = {
3265 	.probe		= velocity_platform_probe,
3266 	.remove		= velocity_platform_remove,
3267 	.driver = {
3268 		.name = "via-velocity",
3269 		.of_match_table = velocity_of_ids,
3270 		.pm = &velocity_pm_ops,
3271 	},
3272 };
3273 
3274 /**
3275  *	velocity_ethtool_up	-	pre hook for ethtool
3276  *	@dev: network device
3277  *
3278  *	Called before an ethtool operation. We need to make sure the
3279  *	chip is out of D3 state before we poke at it. In case of ethtool
3280  *	ops nesting, only wake the device up in the outermost block.
3281  */
3282 static int velocity_ethtool_up(struct net_device *dev)
3283 {
3284 	struct velocity_info *vptr = netdev_priv(dev);
3285 
3286 	if (vptr->ethtool_ops_nesting == U32_MAX)
3287 		return -EBUSY;
3288 	if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
3289 		velocity_set_power_state(vptr, PCI_D0);
3290 	return 0;
3291 }
3292 
3293 /**
3294  *	velocity_ethtool_down	-	post hook for ethtool
3295  *	@dev: network device
3296  *
3297  *	Called after an ethtool operation. Restore the chip back to D3
3298  *	state if it isn't running. In case of ethtool ops nesting, only
3299  *	put the device to sleep in the outermost block.
3300  */
3301 static void velocity_ethtool_down(struct net_device *dev)
3302 {
3303 	struct velocity_info *vptr = netdev_priv(dev);
3304 
3305 	if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
3306 		velocity_set_power_state(vptr, PCI_D3hot);
3307 }
3308 
3309 static int velocity_get_link_ksettings(struct net_device *dev,
3310 				       struct ethtool_link_ksettings *cmd)
3311 {
3312 	struct velocity_info *vptr = netdev_priv(dev);
3313 	struct mac_regs __iomem *regs = vptr->mac_regs;
3314 	u32 status;
3315 	u32 supported, advertising;
3316 
3317 	status = check_connection_type(vptr->mac_regs);
3318 
3319 	supported = SUPPORTED_TP |
3320 			SUPPORTED_Autoneg |
3321 			SUPPORTED_10baseT_Half |
3322 			SUPPORTED_10baseT_Full |
3323 			SUPPORTED_100baseT_Half |
3324 			SUPPORTED_100baseT_Full |
3325 			SUPPORTED_1000baseT_Half |
3326 			SUPPORTED_1000baseT_Full;
3327 
3328 	advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3329 	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3330 		advertising |=
3331 			ADVERTISED_10baseT_Half |
3332 			ADVERTISED_10baseT_Full |
3333 			ADVERTISED_100baseT_Half |
3334 			ADVERTISED_100baseT_Full |
3335 			ADVERTISED_1000baseT_Half |
3336 			ADVERTISED_1000baseT_Full;
3337 	} else {
3338 		switch (vptr->options.spd_dpx) {
3339 		case SPD_DPX_1000_FULL:
3340 			advertising |= ADVERTISED_1000baseT_Full;
3341 			break;
3342 		case SPD_DPX_100_HALF:
3343 			advertising |= ADVERTISED_100baseT_Half;
3344 			break;
3345 		case SPD_DPX_100_FULL:
3346 			advertising |= ADVERTISED_100baseT_Full;
3347 			break;
3348 		case SPD_DPX_10_HALF:
3349 			advertising |= ADVERTISED_10baseT_Half;
3350 			break;
3351 		case SPD_DPX_10_FULL:
3352 			advertising |= ADVERTISED_10baseT_Full;
3353 			break;
3354 		default:
3355 			break;
3356 		}
3357 	}
3358 
3359 	if (status & VELOCITY_SPEED_1000)
3360 		cmd->base.speed = SPEED_1000;
3361 	else if (status & VELOCITY_SPEED_100)
3362 		cmd->base.speed = SPEED_100;
3363 	else
3364 		cmd->base.speed = SPEED_10;
3365 
3366 	cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
3367 		AUTONEG_ENABLE : AUTONEG_DISABLE;
3368 	cmd->base.port = PORT_TP;
3369 	cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
3370 
3371 	if (status & VELOCITY_DUPLEX_FULL)
3372 		cmd->base.duplex = DUPLEX_FULL;
3373 	else
3374 		cmd->base.duplex = DUPLEX_HALF;
3375 
3376 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3377 						supported);
3378 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3379 						advertising);
3380 
3381 	return 0;
3382 }
3383 
3384 static int velocity_set_link_ksettings(struct net_device *dev,
3385 				       const struct ethtool_link_ksettings *cmd)
3386 {
3387 	struct velocity_info *vptr = netdev_priv(dev);
3388 	u32 speed = cmd->base.speed;
3389 	u32 curr_status;
3390 	u32 new_status = 0;
3391 	int ret = 0;
3392 
3393 	curr_status = check_connection_type(vptr->mac_regs);
3394 	curr_status &= (~VELOCITY_LINK_FAIL);
3395 
3396 	new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3397 	new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3398 	new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3399 	new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3400 	new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
3401 		       VELOCITY_DUPLEX_FULL : 0);
3402 
3403 	if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3404 	    (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3405 		ret = -EINVAL;
3406 	} else {
3407 		enum speed_opt spd_dpx;
3408 
3409 		if (new_status & VELOCITY_AUTONEG_ENABLE)
3410 			spd_dpx = SPD_DPX_AUTO;
3411 		else if ((new_status & VELOCITY_SPEED_1000) &&
3412 			 (new_status & VELOCITY_DUPLEX_FULL)) {
3413 			spd_dpx = SPD_DPX_1000_FULL;
3414 		} else if (new_status & VELOCITY_SPEED_100)
3415 			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3416 				SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3417 		else if (new_status & VELOCITY_SPEED_10)
3418 			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3419 				SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3420 		else
3421 			return -EOPNOTSUPP;
3422 
3423 		vptr->options.spd_dpx = spd_dpx;
3424 
3425 		velocity_set_media_mode(vptr, new_status);
3426 	}
3427 
3428 	return ret;
3429 }
3430 
3431 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3432 {
3433 	struct velocity_info *vptr = netdev_priv(dev);
3434 
3435 	strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3436 	strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3437 	if (vptr->pdev)
3438 		strlcpy(info->bus_info, pci_name(vptr->pdev),
3439 						sizeof(info->bus_info));
3440 	else
3441 		strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3442 }
3443 
3444 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3445 {
3446 	struct velocity_info *vptr = netdev_priv(dev);
3447 	wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3448 	wol->wolopts |= WAKE_MAGIC;
3449 	/*
3450 	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3451 		   wol.wolopts|=WAKE_PHY;
3452 			 */
3453 	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3454 		wol->wolopts |= WAKE_UCAST;
3455 	if (vptr->wol_opts & VELOCITY_WOL_ARP)
3456 		wol->wolopts |= WAKE_ARP;
3457 	memcpy(&wol->sopass, vptr->wol_passwd, 6);
3458 }
3459 
3460 static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3461 {
3462 	struct velocity_info *vptr = netdev_priv(dev);
3463 
3464 	if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3465 		return -EFAULT;
3466 	vptr->wol_opts = VELOCITY_WOL_MAGIC;
3467 
3468 	/*
3469 	   if (wol.wolopts & WAKE_PHY) {
3470 	   vptr->wol_opts|=VELOCITY_WOL_PHY;
3471 	   vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3472 	   }
3473 	 */
3474 
3475 	if (wol->wolopts & WAKE_MAGIC) {
3476 		vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3477 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3478 	}
3479 	if (wol->wolopts & WAKE_UCAST) {
3480 		vptr->wol_opts |= VELOCITY_WOL_UCAST;
3481 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3482 	}
3483 	if (wol->wolopts & WAKE_ARP) {
3484 		vptr->wol_opts |= VELOCITY_WOL_ARP;
3485 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3486 	}
3487 	memcpy(vptr->wol_passwd, wol->sopass, 6);
3488 	return 0;
3489 }
3490 
3491 static int get_pending_timer_val(int val)
3492 {
3493 	int mult_bits = val >> 6;
3494 	int mult = 1;
3495 
3496 	switch (mult_bits)
3497 	{
3498 	case 1:
3499 		mult = 4; break;
3500 	case 2:
3501 		mult = 16; break;
3502 	case 3:
3503 		mult = 64; break;
3504 	case 0:
3505 	default:
3506 		break;
3507 	}
3508 
3509 	return (val & 0x3f) * mult;
3510 }
3511 
3512 static void set_pending_timer_val(int *val, u32 us)
3513 {
3514 	u8 mult = 0;
3515 	u8 shift = 0;
3516 
3517 	if (us >= 0x3f) {
3518 		mult = 1; /* mult with 4 */
3519 		shift = 2;
3520 	}
3521 	if (us >= 0x3f * 4) {
3522 		mult = 2; /* mult with 16 */
3523 		shift = 4;
3524 	}
3525 	if (us >= 0x3f * 16) {
3526 		mult = 3; /* mult with 64 */
3527 		shift = 6;
3528 	}
3529 
3530 	*val = (mult << 6) | ((us >> shift) & 0x3f);
3531 }
3532 
3533 
3534 static int velocity_get_coalesce(struct net_device *dev,
3535 		struct ethtool_coalesce *ecmd)
3536 {
3537 	struct velocity_info *vptr = netdev_priv(dev);
3538 
3539 	ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3540 	ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3541 
3542 	ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3543 	ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3544 
3545 	return 0;
3546 }
3547 
3548 static int velocity_set_coalesce(struct net_device *dev,
3549 		struct ethtool_coalesce *ecmd)
3550 {
3551 	struct velocity_info *vptr = netdev_priv(dev);
3552 	int max_us = 0x3f * 64;
3553 	unsigned long flags;
3554 
3555 	/* 6 bits of  */
3556 	if (ecmd->tx_coalesce_usecs > max_us)
3557 		return -EINVAL;
3558 	if (ecmd->rx_coalesce_usecs > max_us)
3559 		return -EINVAL;
3560 
3561 	if (ecmd->tx_max_coalesced_frames > 0xff)
3562 		return -EINVAL;
3563 	if (ecmd->rx_max_coalesced_frames > 0xff)
3564 		return -EINVAL;
3565 
3566 	vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3567 	vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3568 
3569 	set_pending_timer_val(&vptr->options.rxqueue_timer,
3570 			ecmd->rx_coalesce_usecs);
3571 	set_pending_timer_val(&vptr->options.txqueue_timer,
3572 			ecmd->tx_coalesce_usecs);
3573 
3574 	/* Setup the interrupt suppression and queue timers */
3575 	spin_lock_irqsave(&vptr->lock, flags);
3576 	mac_disable_int(vptr->mac_regs);
3577 	setup_adaptive_interrupts(vptr);
3578 	setup_queue_timers(vptr);
3579 
3580 	mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3581 	mac_clear_isr(vptr->mac_regs);
3582 	mac_enable_int(vptr->mac_regs);
3583 	spin_unlock_irqrestore(&vptr->lock, flags);
3584 
3585 	return 0;
3586 }
3587 
3588 static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3589 	"rx_all",
3590 	"rx_ok",
3591 	"tx_ok",
3592 	"rx_error",
3593 	"rx_runt_ok",
3594 	"rx_runt_err",
3595 	"rx_64",
3596 	"tx_64",
3597 	"rx_65_to_127",
3598 	"tx_65_to_127",
3599 	"rx_128_to_255",
3600 	"tx_128_to_255",
3601 	"rx_256_to_511",
3602 	"tx_256_to_511",
3603 	"rx_512_to_1023",
3604 	"tx_512_to_1023",
3605 	"rx_1024_to_1518",
3606 	"tx_1024_to_1518",
3607 	"tx_ether_collisions",
3608 	"rx_crc_errors",
3609 	"rx_jumbo",
3610 	"tx_jumbo",
3611 	"rx_mac_control_frames",
3612 	"tx_mac_control_frames",
3613 	"rx_frame_alignment_errors",
3614 	"rx_long_ok",
3615 	"rx_long_err",
3616 	"tx_sqe_errors",
3617 	"rx_no_buf",
3618 	"rx_symbol_errors",
3619 	"in_range_length_errors",
3620 	"late_collisions"
3621 };
3622 
3623 static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3624 {
3625 	switch (sset) {
3626 	case ETH_SS_STATS:
3627 		memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3628 		break;
3629 	}
3630 }
3631 
3632 static int velocity_get_sset_count(struct net_device *dev, int sset)
3633 {
3634 	switch (sset) {
3635 	case ETH_SS_STATS:
3636 		return ARRAY_SIZE(velocity_gstrings);
3637 	default:
3638 		return -EOPNOTSUPP;
3639 	}
3640 }
3641 
3642 static void velocity_get_ethtool_stats(struct net_device *dev,
3643 				       struct ethtool_stats *stats, u64 *data)
3644 {
3645 	if (netif_running(dev)) {
3646 		struct velocity_info *vptr = netdev_priv(dev);
3647 		u32 *p = vptr->mib_counter;
3648 		int i;
3649 
3650 		spin_lock_irq(&vptr->lock);
3651 		velocity_update_hw_mibs(vptr);
3652 		spin_unlock_irq(&vptr->lock);
3653 
3654 		for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3655 			*data++ = *p++;
3656 	}
3657 }
3658 
3659 static const struct ethtool_ops velocity_ethtool_ops = {
3660 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3661 				     ETHTOOL_COALESCE_MAX_FRAMES,
3662 	.get_drvinfo		= velocity_get_drvinfo,
3663 	.get_wol		= velocity_ethtool_get_wol,
3664 	.set_wol		= velocity_ethtool_set_wol,
3665 	.get_link		= velocity_get_link,
3666 	.get_strings		= velocity_get_strings,
3667 	.get_sset_count		= velocity_get_sset_count,
3668 	.get_ethtool_stats	= velocity_get_ethtool_stats,
3669 	.get_coalesce		= velocity_get_coalesce,
3670 	.set_coalesce		= velocity_set_coalesce,
3671 	.begin			= velocity_ethtool_up,
3672 	.complete		= velocity_ethtool_down,
3673 	.get_link_ksettings	= velocity_get_link_ksettings,
3674 	.set_link_ksettings	= velocity_set_link_ksettings,
3675 };
3676 
3677 #if defined(CONFIG_PM) && defined(CONFIG_INET)
3678 static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3679 {
3680 	struct in_ifaddr *ifa = ptr;
3681 	struct net_device *dev = ifa->ifa_dev->dev;
3682 
3683 	if (dev_net(dev) == &init_net &&
3684 	    dev->netdev_ops == &velocity_netdev_ops)
3685 		velocity_get_ip(netdev_priv(dev));
3686 
3687 	return NOTIFY_DONE;
3688 }
3689 
3690 static struct notifier_block velocity_inetaddr_notifier = {
3691 	.notifier_call	= velocity_netdev_event,
3692 };
3693 
3694 static void velocity_register_notifier(void)
3695 {
3696 	register_inetaddr_notifier(&velocity_inetaddr_notifier);
3697 }
3698 
3699 static void velocity_unregister_notifier(void)
3700 {
3701 	unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3702 }
3703 
3704 #else
3705 
3706 #define velocity_register_notifier()	do {} while (0)
3707 #define velocity_unregister_notifier()	do {} while (0)
3708 
3709 #endif	/* defined(CONFIG_PM) && defined(CONFIG_INET) */
3710 
3711 /**
3712  *	velocity_init_module	-	load time function
3713  *
3714  *	Called when the velocity module is loaded. The PCI driver
3715  *	is registered with the PCI layer, and in turn will call
3716  *	the probe functions for each velocity adapter installed
3717  *	in the system.
3718  */
3719 static int __init velocity_init_module(void)
3720 {
3721 	int ret_pci, ret_platform;
3722 
3723 	velocity_register_notifier();
3724 
3725 	ret_pci = pci_register_driver(&velocity_pci_driver);
3726 	ret_platform = platform_driver_register(&velocity_platform_driver);
3727 
3728 	/* if both_registers failed, remove the notifier */
3729 	if ((ret_pci < 0) && (ret_platform < 0)) {
3730 		velocity_unregister_notifier();
3731 		return ret_pci;
3732 	}
3733 
3734 	return 0;
3735 }
3736 
3737 /**
3738  *	velocity_cleanup	-	module unload
3739  *
3740  *	When the velocity hardware is unloaded this function is called.
3741  *	It will clean up the notifiers and the unregister the PCI
3742  *	driver interface for this hardware. This in turn cleans up
3743  *	all discovered interfaces before returning from the function
3744  */
3745 static void __exit velocity_cleanup_module(void)
3746 {
3747 	velocity_unregister_notifier();
3748 
3749 	pci_unregister_driver(&velocity_pci_driver);
3750 	platform_driver_unregister(&velocity_platform_driver);
3751 }
3752 
3753 module_init(velocity_init_module);
3754 module_exit(velocity_cleanup_module);
3755