1 /*
2  * This code is derived from the VIA reference driver (copyright message
3  * below) provided to Red Hat by VIA Networking Technologies, Inc. for
4  * addition to the Linux kernel.
5  *
6  * The code has been merged into one source file, cleaned up to follow
7  * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
8  * for 64bit hardware platforms.
9  *
10  * TODO
11  *	rx_copybreak/alignment
12  *	More testing
13  *
14  * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
15  * Additional fixes and clean up: Francois Romieu
16  *
17  * This source has not been verified for use in safety critical systems.
18  *
19  * Please direct queries about the revamped driver to the linux-kernel
20  * list not VIA.
21  *
22  * Original code:
23  *
24  * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
25  * All rights reserved.
26  *
27  * This software may be redistributed and/or modified under
28  * the terms of the GNU General Public License as published by the Free
29  * Software Foundation; either version 2 of the License, or
30  * any later version.
31  *
32  * This program is distributed in the hope that it will be useful, but
33  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
34  * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
35  * for more details.
36  *
37  * Author: Chuang Liang-Shing, AJ Jiang
38  *
39  * Date: Jan 24, 2003
40  *
41  * MODULE_LICENSE("GPL");
42  *
43  */
44 
45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/bitops.h>
48 #include <linux/init.h>
49 #include <linux/mm.h>
50 #include <linux/errno.h>
51 #include <linux/ioport.h>
52 #include <linux/pci.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/delay.h>
58 #include <linux/timer.h>
59 #include <linux/slab.h>
60 #include <linux/interrupt.h>
61 #include <linux/string.h>
62 #include <linux/wait.h>
63 #include <linux/io.h>
64 #include <linux/if.h>
65 #include <linux/uaccess.h>
66 #include <linux/proc_fs.h>
67 #include <linux/inetdevice.h>
68 #include <linux/reboot.h>
69 #include <linux/ethtool.h>
70 #include <linux/mii.h>
71 #include <linux/in.h>
72 #include <linux/if_arp.h>
73 #include <linux/if_vlan.h>
74 #include <linux/ip.h>
75 #include <linux/tcp.h>
76 #include <linux/udp.h>
77 #include <linux/crc-ccitt.h>
78 #include <linux/crc32.h>
79 
80 #include "via-velocity.h"
81 
82 
83 static int velocity_nics;
84 static int msglevel = MSG_LEVEL_INFO;
85 
86 /**
87  *	mac_get_cam_mask	-	Read a CAM mask
88  *	@regs: register block for this velocity
89  *	@mask: buffer to store mask
90  *
91  *	Fetch the mask bits of the selected CAM and store them into the
92  *	provided mask buffer.
93  */
94 static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
95 {
96 	int i;
97 
98 	/* Select CAM mask */
99 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
100 
101 	writeb(0, &regs->CAMADDR);
102 
103 	/* read mask */
104 	for (i = 0; i < 8; i++)
105 		*mask++ = readb(&(regs->MARCAM[i]));
106 
107 	/* disable CAMEN */
108 	writeb(0, &regs->CAMADDR);
109 
110 	/* Select mar */
111 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
112 }
113 
114 /**
115  *	mac_set_cam_mask	-	Set a CAM mask
116  *	@regs: register block for this velocity
117  *	@mask: CAM mask to load
118  *
119  *	Store a new mask into a CAM
120  */
121 static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
122 {
123 	int i;
124 	/* Select CAM mask */
125 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
126 
127 	writeb(CAMADDR_CAMEN, &regs->CAMADDR);
128 
129 	for (i = 0; i < 8; i++)
130 		writeb(*mask++, &(regs->MARCAM[i]));
131 
132 	/* disable CAMEN */
133 	writeb(0, &regs->CAMADDR);
134 
135 	/* Select mar */
136 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
137 }
138 
139 static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
140 {
141 	int i;
142 	/* Select CAM mask */
143 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
144 
145 	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
146 
147 	for (i = 0; i < 8; i++)
148 		writeb(*mask++, &(regs->MARCAM[i]));
149 
150 	/* disable CAMEN */
151 	writeb(0, &regs->CAMADDR);
152 
153 	/* Select mar */
154 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
155 }
156 
157 /**
158  *	mac_set_cam	-	set CAM data
159  *	@regs: register block of this velocity
160  *	@idx: Cam index
161  *	@addr: 2 or 6 bytes of CAM data
162  *
163  *	Load an address or vlan tag into a CAM
164  */
165 static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
166 {
167 	int i;
168 
169 	/* Select CAM mask */
170 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
171 
172 	idx &= (64 - 1);
173 
174 	writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
175 
176 	for (i = 0; i < 6; i++)
177 		writeb(*addr++, &(regs->MARCAM[i]));
178 
179 	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
180 
181 	udelay(10);
182 
183 	writeb(0, &regs->CAMADDR);
184 
185 	/* Select mar */
186 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
187 }
188 
189 static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
190 			     const u8 *addr)
191 {
192 
193 	/* Select CAM mask */
194 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
195 
196 	idx &= (64 - 1);
197 
198 	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
199 	writew(*((u16 *) addr), &regs->MARCAM[0]);
200 
201 	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
202 
203 	udelay(10);
204 
205 	writeb(0, &regs->CAMADDR);
206 
207 	/* Select mar */
208 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
209 }
210 
211 
212 /**
213  *	mac_wol_reset	-	reset WOL after exiting low power
214  *	@regs: register block of this velocity
215  *
216  *	Called after we drop out of wake on lan mode in order to
217  *	reset the Wake on lan features. This function doesn't restore
218  *	the rest of the logic from the result of sleep/wakeup
219  */
220 static void mac_wol_reset(struct mac_regs __iomem *regs)
221 {
222 
223 	/* Turn off SWPTAG right after leaving power mode */
224 	BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
225 	/* clear sticky bits */
226 	BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
227 
228 	BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
229 	BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
230 	/* disable force PME-enable */
231 	writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
232 	/* disable power-event config bit */
233 	writew(0xFFFF, &regs->WOLCRClr);
234 	/* clear power status */
235 	writew(0xFFFF, &regs->WOLSRClr);
236 }
237 
238 static const struct ethtool_ops velocity_ethtool_ops;
239 
240 /*
241     Define module options
242 */
243 
244 MODULE_AUTHOR("VIA Networking Technologies, Inc.");
245 MODULE_LICENSE("GPL");
246 MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
247 
248 #define VELOCITY_PARAM(N, D) \
249 	static int N[MAX_UNITS] = OPTION_DEFAULT;\
250 	module_param_array(N, int, NULL, 0); \
251 	MODULE_PARM_DESC(N, D);
252 
253 #define RX_DESC_MIN     64
254 #define RX_DESC_MAX     255
255 #define RX_DESC_DEF     64
256 VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
257 
258 #define TX_DESC_MIN     16
259 #define TX_DESC_MAX     256
260 #define TX_DESC_DEF     64
261 VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
262 
263 #define RX_THRESH_MIN   0
264 #define RX_THRESH_MAX   3
265 #define RX_THRESH_DEF   0
266 /* rx_thresh[] is used for controlling the receive fifo threshold.
267    0: indicate the rxfifo threshold is 128 bytes.
268    1: indicate the rxfifo threshold is 512 bytes.
269    2: indicate the rxfifo threshold is 1024 bytes.
270    3: indicate the rxfifo threshold is store & forward.
271 */
272 VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
273 
274 #define DMA_LENGTH_MIN  0
275 #define DMA_LENGTH_MAX  7
276 #define DMA_LENGTH_DEF  6
277 
278 /* DMA_length[] is used for controlling the DMA length
279    0: 8 DWORDs
280    1: 16 DWORDs
281    2: 32 DWORDs
282    3: 64 DWORDs
283    4: 128 DWORDs
284    5: 256 DWORDs
285    6: SF(flush till emply)
286    7: SF(flush till emply)
287 */
288 VELOCITY_PARAM(DMA_length, "DMA length");
289 
290 #define IP_ALIG_DEF     0
291 /* IP_byte_align[] is used for IP header DWORD byte aligned
292    0: indicate the IP header won't be DWORD byte aligned.(Default) .
293    1: indicate the IP header will be DWORD byte aligned.
294       In some environment, the IP header should be DWORD byte aligned,
295       or the packet will be droped when we receive it. (eg: IPVS)
296 */
297 VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
298 
299 #define FLOW_CNTL_DEF   1
300 #define FLOW_CNTL_MIN   1
301 #define FLOW_CNTL_MAX   5
302 
303 /* flow_control[] is used for setting the flow control ability of NIC.
304    1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
305    2: enable TX flow control.
306    3: enable RX flow control.
307    4: enable RX/TX flow control.
308    5: disable
309 */
310 VELOCITY_PARAM(flow_control, "Enable flow control ability");
311 
312 #define MED_LNK_DEF 0
313 #define MED_LNK_MIN 0
314 #define MED_LNK_MAX 5
315 /* speed_duplex[] is used for setting the speed and duplex mode of NIC.
316    0: indicate autonegotiation for both speed and duplex mode
317    1: indicate 100Mbps half duplex mode
318    2: indicate 100Mbps full duplex mode
319    3: indicate 10Mbps half duplex mode
320    4: indicate 10Mbps full duplex mode
321    5: indicate 1000Mbps full duplex mode
322 
323    Note:
324    if EEPROM have been set to the force mode, this option is ignored
325    by driver.
326 */
327 VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
328 
329 #define VAL_PKT_LEN_DEF     0
330 /* ValPktLen[] is used for setting the checksum offload ability of NIC.
331    0: Receive frame with invalid layer 2 length (Default)
332    1: Drop frame with invalid layer 2 length
333 */
334 VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
335 
336 #define WOL_OPT_DEF     0
337 #define WOL_OPT_MIN     0
338 #define WOL_OPT_MAX     7
339 /* wol_opts[] is used for controlling wake on lan behavior.
340    0: Wake up if recevied a magic packet. (Default)
341    1: Wake up if link status is on/off.
342    2: Wake up if recevied an arp packet.
343    4: Wake up if recevied any unicast packet.
344    Those value can be sumed up to support more than one option.
345 */
346 VELOCITY_PARAM(wol_opts, "Wake On Lan options");
347 
348 static int rx_copybreak = 200;
349 module_param(rx_copybreak, int, 0644);
350 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
351 
352 /*
353  *	Internal board variants. At the moment we have only one
354  */
355 static struct velocity_info_tbl chip_info_table[] = {
356 	{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
357 	{ }
358 };
359 
360 /*
361  *	Describe the PCI device identifiers that we support in this
362  *	device driver. Used for hotplug autoloading.
363  */
364 static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
365 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
366 	{ }
367 };
368 
369 MODULE_DEVICE_TABLE(pci, velocity_id_table);
370 
371 /**
372  *	get_chip_name	- 	identifier to name
373  *	@id: chip identifier
374  *
375  *	Given a chip identifier return a suitable description. Returns
376  *	a pointer a static string valid while the driver is loaded.
377  */
378 static const char *get_chip_name(enum chip_type chip_id)
379 {
380 	int i;
381 	for (i = 0; chip_info_table[i].name != NULL; i++)
382 		if (chip_info_table[i].chip_id == chip_id)
383 			break;
384 	return chip_info_table[i].name;
385 }
386 
387 /**
388  *	velocity_remove1	-	device unplug
389  *	@pdev: PCI device being removed
390  *
391  *	Device unload callback. Called on an unplug or on module
392  *	unload for each active device that is present. Disconnects
393  *	the device from the network layer and frees all the resources
394  */
395 static void velocity_remove1(struct pci_dev *pdev)
396 {
397 	struct net_device *dev = pci_get_drvdata(pdev);
398 	struct velocity_info *vptr = netdev_priv(dev);
399 
400 	unregister_netdev(dev);
401 	iounmap(vptr->mac_regs);
402 	pci_release_regions(pdev);
403 	pci_disable_device(pdev);
404 	pci_set_drvdata(pdev, NULL);
405 	free_netdev(dev);
406 
407 	velocity_nics--;
408 }
409 
410 /**
411  *	velocity_set_int_opt	-	parser for integer options
412  *	@opt: pointer to option value
413  *	@val: value the user requested (or -1 for default)
414  *	@min: lowest value allowed
415  *	@max: highest value allowed
416  *	@def: default value
417  *	@name: property name
418  *	@dev: device name
419  *
420  *	Set an integer property in the module options. This function does
421  *	all the verification and checking as well as reporting so that
422  *	we don't duplicate code for each option.
423  */
424 static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
425 				 char *name, const char *devname)
426 {
427 	if (val == -1)
428 		*opt = def;
429 	else if (val < min || val > max) {
430 		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
431 					devname, name, min, max);
432 		*opt = def;
433 	} else {
434 		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
435 					devname, name, val);
436 		*opt = val;
437 	}
438 }
439 
440 /**
441  *	velocity_set_bool_opt	-	parser for boolean options
442  *	@opt: pointer to option value
443  *	@val: value the user requested (or -1 for default)
444  *	@def: default value (yes/no)
445  *	@flag: numeric value to set for true.
446  *	@name: property name
447  *	@dev: device name
448  *
449  *	Set a boolean property in the module options. This function does
450  *	all the verification and checking as well as reporting so that
451  *	we don't duplicate code for each option.
452  */
453 static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
454 				  char *name, const char *devname)
455 {
456 	(*opt) &= (~flag);
457 	if (val == -1)
458 		*opt |= (def ? flag : 0);
459 	else if (val < 0 || val > 1) {
460 		printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
461 			devname, name);
462 		*opt |= (def ? flag : 0);
463 	} else {
464 		printk(KERN_INFO "%s: set parameter %s to %s\n",
465 			devname, name, val ? "TRUE" : "FALSE");
466 		*opt |= (val ? flag : 0);
467 	}
468 }
469 
470 /**
471  *	velocity_get_options	-	set options on device
472  *	@opts: option structure for the device
473  *	@index: index of option to use in module options array
474  *	@devname: device name
475  *
476  *	Turn the module and command options into a single structure
477  *	for the current device
478  */
479 static void velocity_get_options(struct velocity_opt *opts, int index,
480 				 const char *devname)
481 {
482 
483 	velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
484 	velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
485 	velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
486 	velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
487 
488 	velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
489 	velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
490 	velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
491 	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
492 	velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
493 	opts->numrx = (opts->numrx & ~3);
494 }
495 
496 /**
497  *	velocity_init_cam_filter	-	initialise CAM
498  *	@vptr: velocity to program
499  *
500  *	Initialize the content addressable memory used for filters. Load
501  *	appropriately according to the presence of VLAN
502  */
503 static void velocity_init_cam_filter(struct velocity_info *vptr)
504 {
505 	struct mac_regs __iomem *regs = vptr->mac_regs;
506 	unsigned int vid, i = 0;
507 
508 	/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
509 	WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
510 	WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
511 
512 	/* Disable all CAMs */
513 	memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
514 	memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
515 	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
516 	mac_set_cam_mask(regs, vptr->mCAMmask);
517 
518 	/* Enable VCAMs */
519 	for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
520 		mac_set_vlan_cam(regs, i, (u8 *) &vid);
521 		vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
522 		if (++i >= VCAM_SIZE)
523 			break;
524 	}
525 	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
526 }
527 
528 static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
529 {
530 	struct velocity_info *vptr = netdev_priv(dev);
531 
532 	spin_lock_irq(&vptr->lock);
533 	set_bit(vid, vptr->active_vlans);
534 	velocity_init_cam_filter(vptr);
535 	spin_unlock_irq(&vptr->lock);
536 	return 0;
537 }
538 
539 static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
540 {
541 	struct velocity_info *vptr = netdev_priv(dev);
542 
543 	spin_lock_irq(&vptr->lock);
544 	clear_bit(vid, vptr->active_vlans);
545 	velocity_init_cam_filter(vptr);
546 	spin_unlock_irq(&vptr->lock);
547 	return 0;
548 }
549 
550 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
551 {
552 	vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
553 }
554 
555 /**
556  *	velocity_rx_reset	-	handle a receive reset
557  *	@vptr: velocity we are resetting
558  *
559  *	Reset the ownership and status for the receive ring side.
560  *	Hand all the receive queue to the NIC.
561  */
562 static void velocity_rx_reset(struct velocity_info *vptr)
563 {
564 
565 	struct mac_regs __iomem *regs = vptr->mac_regs;
566 	int i;
567 
568 	velocity_init_rx_ring_indexes(vptr);
569 
570 	/*
571 	 *	Init state, all RD entries belong to the NIC
572 	 */
573 	for (i = 0; i < vptr->options.numrx; ++i)
574 		vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
575 
576 	writew(vptr->options.numrx, &regs->RBRDU);
577 	writel(vptr->rx.pool_dma, &regs->RDBaseLo);
578 	writew(0, &regs->RDIdx);
579 	writew(vptr->options.numrx - 1, &regs->RDCSize);
580 }
581 
582 /**
583  *	velocity_get_opt_media_mode	-	get media selection
584  *	@vptr: velocity adapter
585  *
586  *	Get the media mode stored in EEPROM or module options and load
587  *	mii_status accordingly. The requested link state information
588  *	is also returned.
589  */
590 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
591 {
592 	u32 status = 0;
593 
594 	switch (vptr->options.spd_dpx) {
595 	case SPD_DPX_AUTO:
596 		status = VELOCITY_AUTONEG_ENABLE;
597 		break;
598 	case SPD_DPX_100_FULL:
599 		status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
600 		break;
601 	case SPD_DPX_10_FULL:
602 		status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
603 		break;
604 	case SPD_DPX_100_HALF:
605 		status = VELOCITY_SPEED_100;
606 		break;
607 	case SPD_DPX_10_HALF:
608 		status = VELOCITY_SPEED_10;
609 		break;
610 	case SPD_DPX_1000_FULL:
611 		status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
612 		break;
613 	}
614 	vptr->mii_status = status;
615 	return status;
616 }
617 
618 /**
619  *	safe_disable_mii_autopoll	-	autopoll off
620  *	@regs: velocity registers
621  *
622  *	Turn off the autopoll and wait for it to disable on the chip
623  */
624 static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
625 {
626 	u16 ww;
627 
628 	/*  turn off MAUTO */
629 	writeb(0, &regs->MIICR);
630 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
631 		udelay(1);
632 		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
633 			break;
634 	}
635 }
636 
637 /**
638  *	enable_mii_autopoll	-	turn on autopolling
639  *	@regs: velocity registers
640  *
641  *	Enable the MII link status autopoll feature on the Velocity
642  *	hardware. Wait for it to enable.
643  */
644 static void enable_mii_autopoll(struct mac_regs __iomem *regs)
645 {
646 	int ii;
647 
648 	writeb(0, &(regs->MIICR));
649 	writeb(MIIADR_SWMPL, &regs->MIIADR);
650 
651 	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
652 		udelay(1);
653 		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
654 			break;
655 	}
656 
657 	writeb(MIICR_MAUTO, &regs->MIICR);
658 
659 	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
660 		udelay(1);
661 		if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
662 			break;
663 	}
664 
665 }
666 
667 /**
668  *	velocity_mii_read	-	read MII data
669  *	@regs: velocity registers
670  *	@index: MII register index
671  *	@data: buffer for received data
672  *
673  *	Perform a single read of an MII 16bit register. Returns zero
674  *	on success or -ETIMEDOUT if the PHY did not respond.
675  */
676 static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
677 {
678 	u16 ww;
679 
680 	/*
681 	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
682 	 */
683 	safe_disable_mii_autopoll(regs);
684 
685 	writeb(index, &regs->MIIADR);
686 
687 	BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
688 
689 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
690 		if (!(readb(&regs->MIICR) & MIICR_RCMD))
691 			break;
692 	}
693 
694 	*data = readw(&regs->MIIDATA);
695 
696 	enable_mii_autopoll(regs);
697 	if (ww == W_MAX_TIMEOUT)
698 		return -ETIMEDOUT;
699 	return 0;
700 }
701 
702 /**
703  *	mii_check_media_mode	-	check media state
704  *	@regs: velocity registers
705  *
706  *	Check the current MII status and determine the link status
707  *	accordingly
708  */
709 static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
710 {
711 	u32 status = 0;
712 	u16 ANAR;
713 
714 	if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
715 		status |= VELOCITY_LINK_FAIL;
716 
717 	if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
718 		status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
719 	else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
720 		status |= (VELOCITY_SPEED_1000);
721 	else {
722 		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
723 		if (ANAR & ADVERTISE_100FULL)
724 			status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
725 		else if (ANAR & ADVERTISE_100HALF)
726 			status |= VELOCITY_SPEED_100;
727 		else if (ANAR & ADVERTISE_10FULL)
728 			status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
729 		else
730 			status |= (VELOCITY_SPEED_10);
731 	}
732 
733 	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
734 		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
735 		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
736 		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
737 			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
738 				status |= VELOCITY_AUTONEG_ENABLE;
739 		}
740 	}
741 
742 	return status;
743 }
744 
745 /**
746  *	velocity_mii_write	-	write MII data
747  *	@regs: velocity registers
748  *	@index: MII register index
749  *	@data: 16bit data for the MII register
750  *
751  *	Perform a single write to an MII 16bit register. Returns zero
752  *	on success or -ETIMEDOUT if the PHY did not respond.
753  */
754 static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
755 {
756 	u16 ww;
757 
758 	/*
759 	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
760 	 */
761 	safe_disable_mii_autopoll(regs);
762 
763 	/* MII reg offset */
764 	writeb(mii_addr, &regs->MIIADR);
765 	/* set MII data */
766 	writew(data, &regs->MIIDATA);
767 
768 	/* turn on MIICR_WCMD */
769 	BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
770 
771 	/* W_MAX_TIMEOUT is the timeout period */
772 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
773 		udelay(5);
774 		if (!(readb(&regs->MIICR) & MIICR_WCMD))
775 			break;
776 	}
777 	enable_mii_autopoll(regs);
778 
779 	if (ww == W_MAX_TIMEOUT)
780 		return -ETIMEDOUT;
781 	return 0;
782 }
783 
784 /**
785  *	set_mii_flow_control	-	flow control setup
786  *	@vptr: velocity interface
787  *
788  *	Set up the flow control on this interface according to
789  *	the supplied user/eeprom options.
790  */
791 static void set_mii_flow_control(struct velocity_info *vptr)
792 {
793 	/*Enable or Disable PAUSE in ANAR */
794 	switch (vptr->options.flow_cntl) {
795 	case FLOW_CNTL_TX:
796 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
797 		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
798 		break;
799 
800 	case FLOW_CNTL_RX:
801 		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
802 		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
803 		break;
804 
805 	case FLOW_CNTL_TX_RX:
806 		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
807 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
808 		break;
809 
810 	case FLOW_CNTL_DISABLE:
811 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
812 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
813 		break;
814 	default:
815 		break;
816 	}
817 }
818 
819 /**
820  *	mii_set_auto_on		-	autonegotiate on
821  *	@vptr: velocity
822  *
823  *	Enable autonegotation on this interface
824  */
825 static void mii_set_auto_on(struct velocity_info *vptr)
826 {
827 	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
828 		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
829 	else
830 		MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
831 }
832 
833 static u32 check_connection_type(struct mac_regs __iomem *regs)
834 {
835 	u32 status = 0;
836 	u8 PHYSR0;
837 	u16 ANAR;
838 	PHYSR0 = readb(&regs->PHYSR0);
839 
840 	/*
841 	   if (!(PHYSR0 & PHYSR0_LINKGD))
842 	   status|=VELOCITY_LINK_FAIL;
843 	 */
844 
845 	if (PHYSR0 & PHYSR0_FDPX)
846 		status |= VELOCITY_DUPLEX_FULL;
847 
848 	if (PHYSR0 & PHYSR0_SPDG)
849 		status |= VELOCITY_SPEED_1000;
850 	else if (PHYSR0 & PHYSR0_SPD10)
851 		status |= VELOCITY_SPEED_10;
852 	else
853 		status |= VELOCITY_SPEED_100;
854 
855 	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
856 		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
857 		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
858 		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
859 			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
860 				status |= VELOCITY_AUTONEG_ENABLE;
861 		}
862 	}
863 
864 	return status;
865 }
866 
867 /**
868  *	velocity_set_media_mode		-	set media mode
869  *	@mii_status: old MII link state
870  *
871  *	Check the media link state and configure the flow control
872  *	PHY and also velocity hardware setup accordingly. In particular
873  *	we need to set up CD polling and frame bursting.
874  */
875 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
876 {
877 	u32 curr_status;
878 	struct mac_regs __iomem *regs = vptr->mac_regs;
879 
880 	vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
881 	curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
882 
883 	/* Set mii link status */
884 	set_mii_flow_control(vptr);
885 
886 	/*
887 	   Check if new status is consistent with current status
888 	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
889 	       (mii_status==curr_status)) {
890 	   vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
891 	   vptr->mii_status=check_connection_type(vptr->mac_regs);
892 	   VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
893 	   return 0;
894 	   }
895 	 */
896 
897 	if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
898 		MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
899 
900 	/*
901 	 *	If connection type is AUTO
902 	 */
903 	if (mii_status & VELOCITY_AUTONEG_ENABLE) {
904 		VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
905 		/* clear force MAC mode bit */
906 		BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
907 		/* set duplex mode of MAC according to duplex mode of MII */
908 		MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
909 		MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
910 		MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
911 
912 		/* enable AUTO-NEGO mode */
913 		mii_set_auto_on(vptr);
914 	} else {
915 		u16 CTRL1000;
916 		u16 ANAR;
917 		u8 CHIPGCR;
918 
919 		/*
920 		 * 1. if it's 3119, disable frame bursting in halfduplex mode
921 		 *    and enable it in fullduplex mode
922 		 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
923 		 * 3. only enable CD heart beat counter in 10HD mode
924 		 */
925 
926 		/* set force MAC mode bit */
927 		BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
928 
929 		CHIPGCR = readb(&regs->CHIPGCR);
930 
931 		if (mii_status & VELOCITY_SPEED_1000)
932 			CHIPGCR |= CHIPGCR_FCGMII;
933 		else
934 			CHIPGCR &= ~CHIPGCR_FCGMII;
935 
936 		if (mii_status & VELOCITY_DUPLEX_FULL) {
937 			CHIPGCR |= CHIPGCR_FCFDX;
938 			writeb(CHIPGCR, &regs->CHIPGCR);
939 			VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
940 			if (vptr->rev_id < REV_ID_VT3216_A0)
941 				BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
942 		} else {
943 			CHIPGCR &= ~CHIPGCR_FCFDX;
944 			VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
945 			writeb(CHIPGCR, &regs->CHIPGCR);
946 			if (vptr->rev_id < REV_ID_VT3216_A0)
947 				BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
948 		}
949 
950 		velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
951 		CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
952 		if ((mii_status & VELOCITY_SPEED_1000) &&
953 		    (mii_status & VELOCITY_DUPLEX_FULL)) {
954 			CTRL1000 |= ADVERTISE_1000FULL;
955 		}
956 		velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
957 
958 		if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
959 			BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
960 		else
961 			BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
962 
963 		/* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
964 		velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
965 		ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
966 		if (mii_status & VELOCITY_SPEED_100) {
967 			if (mii_status & VELOCITY_DUPLEX_FULL)
968 				ANAR |= ADVERTISE_100FULL;
969 			else
970 				ANAR |= ADVERTISE_100HALF;
971 		} else if (mii_status & VELOCITY_SPEED_10) {
972 			if (mii_status & VELOCITY_DUPLEX_FULL)
973 				ANAR |= ADVERTISE_10FULL;
974 			else
975 				ANAR |= ADVERTISE_10HALF;
976 		}
977 		velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
978 		/* enable AUTO-NEGO mode */
979 		mii_set_auto_on(vptr);
980 		/* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
981 	}
982 	/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
983 	/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
984 	return VELOCITY_LINK_CHANGE;
985 }
986 
987 /**
988  *	velocity_print_link_status	-	link status reporting
989  *	@vptr: velocity to report on
990  *
991  *	Turn the link status of the velocity card into a kernel log
992  *	description of the new link state, detailing speed and duplex
993  *	status
994  */
995 static void velocity_print_link_status(struct velocity_info *vptr)
996 {
997 
998 	if (vptr->mii_status & VELOCITY_LINK_FAIL) {
999 		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1000 	} else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1001 		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1002 
1003 		if (vptr->mii_status & VELOCITY_SPEED_1000)
1004 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1005 		else if (vptr->mii_status & VELOCITY_SPEED_100)
1006 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1007 		else
1008 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1009 
1010 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1011 			VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1012 		else
1013 			VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1014 	} else {
1015 		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1016 		switch (vptr->options.spd_dpx) {
1017 		case SPD_DPX_1000_FULL:
1018 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1019 			break;
1020 		case SPD_DPX_100_HALF:
1021 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1022 			break;
1023 		case SPD_DPX_100_FULL:
1024 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1025 			break;
1026 		case SPD_DPX_10_HALF:
1027 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1028 			break;
1029 		case SPD_DPX_10_FULL:
1030 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1031 			break;
1032 		default:
1033 			break;
1034 		}
1035 	}
1036 }
1037 
1038 /**
1039  *	enable_flow_control_ability	-	flow control
1040  *	@vptr: veloity to configure
1041  *
1042  *	Set up flow control according to the flow control options
1043  *	determined by the eeprom/configuration.
1044  */
1045 static void enable_flow_control_ability(struct velocity_info *vptr)
1046 {
1047 
1048 	struct mac_regs __iomem *regs = vptr->mac_regs;
1049 
1050 	switch (vptr->options.flow_cntl) {
1051 
1052 	case FLOW_CNTL_DEFAULT:
1053 		if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1054 			writel(CR0_FDXRFCEN, &regs->CR0Set);
1055 		else
1056 			writel(CR0_FDXRFCEN, &regs->CR0Clr);
1057 
1058 		if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1059 			writel(CR0_FDXTFCEN, &regs->CR0Set);
1060 		else
1061 			writel(CR0_FDXTFCEN, &regs->CR0Clr);
1062 		break;
1063 
1064 	case FLOW_CNTL_TX:
1065 		writel(CR0_FDXTFCEN, &regs->CR0Set);
1066 		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1067 		break;
1068 
1069 	case FLOW_CNTL_RX:
1070 		writel(CR0_FDXRFCEN, &regs->CR0Set);
1071 		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1072 		break;
1073 
1074 	case FLOW_CNTL_TX_RX:
1075 		writel(CR0_FDXTFCEN, &regs->CR0Set);
1076 		writel(CR0_FDXRFCEN, &regs->CR0Set);
1077 		break;
1078 
1079 	case FLOW_CNTL_DISABLE:
1080 		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1081 		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1082 		break;
1083 
1084 	default:
1085 		break;
1086 	}
1087 
1088 }
1089 
1090 /**
1091  *	velocity_soft_reset	-	soft reset
1092  *	@vptr: velocity to reset
1093  *
1094  *	Kick off a soft reset of the velocity adapter and then poll
1095  *	until the reset sequence has completed before returning.
1096  */
1097 static int velocity_soft_reset(struct velocity_info *vptr)
1098 {
1099 	struct mac_regs __iomem *regs = vptr->mac_regs;
1100 	int i = 0;
1101 
1102 	writel(CR0_SFRST, &regs->CR0Set);
1103 
1104 	for (i = 0; i < W_MAX_TIMEOUT; i++) {
1105 		udelay(5);
1106 		if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1107 			break;
1108 	}
1109 
1110 	if (i == W_MAX_TIMEOUT) {
1111 		writel(CR0_FORSRST, &regs->CR0Set);
1112 		/* FIXME: PCI POSTING */
1113 		/* delay 2ms */
1114 		mdelay(2);
1115 	}
1116 	return 0;
1117 }
1118 
1119 /**
1120  *	velocity_set_multi	-	filter list change callback
1121  *	@dev: network device
1122  *
1123  *	Called by the network layer when the filter lists need to change
1124  *	for a velocity adapter. Reload the CAMs with the new address
1125  *	filter ruleset.
1126  */
1127 static void velocity_set_multi(struct net_device *dev)
1128 {
1129 	struct velocity_info *vptr = netdev_priv(dev);
1130 	struct mac_regs __iomem *regs = vptr->mac_regs;
1131 	u8 rx_mode;
1132 	int i;
1133 	struct netdev_hw_addr *ha;
1134 
1135 	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
1136 		writel(0xffffffff, &regs->MARCAM[0]);
1137 		writel(0xffffffff, &regs->MARCAM[4]);
1138 		rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1139 	} else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1140 		   (dev->flags & IFF_ALLMULTI)) {
1141 		writel(0xffffffff, &regs->MARCAM[0]);
1142 		writel(0xffffffff, &regs->MARCAM[4]);
1143 		rx_mode = (RCR_AM | RCR_AB);
1144 	} else {
1145 		int offset = MCAM_SIZE - vptr->multicast_limit;
1146 		mac_get_cam_mask(regs, vptr->mCAMmask);
1147 
1148 		i = 0;
1149 		netdev_for_each_mc_addr(ha, dev) {
1150 			mac_set_cam(regs, i + offset, ha->addr);
1151 			vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1152 			i++;
1153 		}
1154 
1155 		mac_set_cam_mask(regs, vptr->mCAMmask);
1156 		rx_mode = RCR_AM | RCR_AB | RCR_AP;
1157 	}
1158 	if (dev->mtu > 1500)
1159 		rx_mode |= RCR_AL;
1160 
1161 	BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1162 
1163 }
1164 
1165 /*
1166  * MII access , media link mode setting functions
1167  */
1168 
1169 /**
1170  *	mii_init	-	set up MII
1171  *	@vptr: velocity adapter
1172  *	@mii_status:  links tatus
1173  *
1174  *	Set up the PHY for the current link state.
1175  */
1176 static void mii_init(struct velocity_info *vptr, u32 mii_status)
1177 {
1178 	u16 BMCR;
1179 
1180 	switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1181 	case PHYID_CICADA_CS8201:
1182 		/*
1183 		 *	Reset to hardware default
1184 		 */
1185 		MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1186 		/*
1187 		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1188 		 *	off it in NWay-forced half mode for NWay-forced v.s.
1189 		 *	legacy-forced issue.
1190 		 */
1191 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1192 			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1193 		else
1194 			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1195 		/*
1196 		 *	Turn on Link/Activity LED enable bit for CIS8201
1197 		 */
1198 		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1199 		break;
1200 	case PHYID_VT3216_32BIT:
1201 	case PHYID_VT3216_64BIT:
1202 		/*
1203 		 *	Reset to hardware default
1204 		 */
1205 		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1206 		/*
1207 		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1208 		 *	off it in NWay-forced half mode for NWay-forced v.s.
1209 		 *	legacy-forced issue
1210 		 */
1211 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1212 			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1213 		else
1214 			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1215 		break;
1216 
1217 	case PHYID_MARVELL_1000:
1218 	case PHYID_MARVELL_1000S:
1219 		/*
1220 		 *	Assert CRS on Transmit
1221 		 */
1222 		MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1223 		/*
1224 		 *	Reset to hardware default
1225 		 */
1226 		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1227 		break;
1228 	default:
1229 		;
1230 	}
1231 	velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1232 	if (BMCR & BMCR_ISOLATE) {
1233 		BMCR &= ~BMCR_ISOLATE;
1234 		velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1235 	}
1236 }
1237 
1238 /**
1239  * setup_queue_timers	-	Setup interrupt timers
1240  *
1241  * Setup interrupt frequency during suppression (timeout if the frame
1242  * count isn't filled).
1243  */
1244 static void setup_queue_timers(struct velocity_info *vptr)
1245 {
1246 	/* Only for newer revisions */
1247 	if (vptr->rev_id >= REV_ID_VT3216_A0) {
1248 		u8 txqueue_timer = 0;
1249 		u8 rxqueue_timer = 0;
1250 
1251 		if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1252 				VELOCITY_SPEED_100)) {
1253 			txqueue_timer = vptr->options.txqueue_timer;
1254 			rxqueue_timer = vptr->options.rxqueue_timer;
1255 		}
1256 
1257 		writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1258 		writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1259 	}
1260 }
1261 
1262 /**
1263  * setup_adaptive_interrupts  -  Setup interrupt suppression
1264  *
1265  * @vptr velocity adapter
1266  *
1267  * The velocity is able to suppress interrupt during high interrupt load.
1268  * This function turns on that feature.
1269  */
1270 static void setup_adaptive_interrupts(struct velocity_info *vptr)
1271 {
1272 	struct mac_regs __iomem *regs = vptr->mac_regs;
1273 	u16 tx_intsup = vptr->options.tx_intsup;
1274 	u16 rx_intsup = vptr->options.rx_intsup;
1275 
1276 	/* Setup default interrupt mask (will be changed below) */
1277 	vptr->int_mask = INT_MASK_DEF;
1278 
1279 	/* Set Tx Interrupt Suppression Threshold */
1280 	writeb(CAMCR_PS0, &regs->CAMCR);
1281 	if (tx_intsup != 0) {
1282 		vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1283 				ISR_PTX2I | ISR_PTX3I);
1284 		writew(tx_intsup, &regs->ISRCTL);
1285 	} else
1286 		writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1287 
1288 	/* Set Rx Interrupt Suppression Threshold */
1289 	writeb(CAMCR_PS1, &regs->CAMCR);
1290 	if (rx_intsup != 0) {
1291 		vptr->int_mask &= ~ISR_PRXI;
1292 		writew(rx_intsup, &regs->ISRCTL);
1293 	} else
1294 		writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1295 
1296 	/* Select page to interrupt hold timer */
1297 	writeb(0, &regs->CAMCR);
1298 }
1299 
1300 /**
1301  *	velocity_init_registers	-	initialise MAC registers
1302  *	@vptr: velocity to init
1303  *	@type: type of initialisation (hot or cold)
1304  *
1305  *	Initialise the MAC on a reset or on first set up on the
1306  *	hardware.
1307  */
1308 static void velocity_init_registers(struct velocity_info *vptr,
1309 				    enum velocity_init_type type)
1310 {
1311 	struct mac_regs __iomem *regs = vptr->mac_regs;
1312 	int i, mii_status;
1313 
1314 	mac_wol_reset(regs);
1315 
1316 	switch (type) {
1317 	case VELOCITY_INIT_RESET:
1318 	case VELOCITY_INIT_WOL:
1319 
1320 		netif_stop_queue(vptr->dev);
1321 
1322 		/*
1323 		 *	Reset RX to prevent RX pointer not on the 4X location
1324 		 */
1325 		velocity_rx_reset(vptr);
1326 		mac_rx_queue_run(regs);
1327 		mac_rx_queue_wake(regs);
1328 
1329 		mii_status = velocity_get_opt_media_mode(vptr);
1330 		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1331 			velocity_print_link_status(vptr);
1332 			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1333 				netif_wake_queue(vptr->dev);
1334 		}
1335 
1336 		enable_flow_control_ability(vptr);
1337 
1338 		mac_clear_isr(regs);
1339 		writel(CR0_STOP, &regs->CR0Clr);
1340 		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1341 							&regs->CR0Set);
1342 
1343 		break;
1344 
1345 	case VELOCITY_INIT_COLD:
1346 	default:
1347 		/*
1348 		 *	Do reset
1349 		 */
1350 		velocity_soft_reset(vptr);
1351 		mdelay(5);
1352 
1353 		mac_eeprom_reload(regs);
1354 		for (i = 0; i < 6; i++)
1355 			writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
1356 
1357 		/*
1358 		 *	clear Pre_ACPI bit.
1359 		 */
1360 		BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1361 		mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1362 		mac_set_dma_length(regs, vptr->options.DMA_length);
1363 
1364 		writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1365 		/*
1366 		 *	Back off algorithm use original IEEE standard
1367 		 */
1368 		BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1369 
1370 		/*
1371 		 *	Init CAM filter
1372 		 */
1373 		velocity_init_cam_filter(vptr);
1374 
1375 		/*
1376 		 *	Set packet filter: Receive directed and broadcast address
1377 		 */
1378 		velocity_set_multi(vptr->dev);
1379 
1380 		/*
1381 		 *	Enable MII auto-polling
1382 		 */
1383 		enable_mii_autopoll(regs);
1384 
1385 		setup_adaptive_interrupts(vptr);
1386 
1387 		writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1388 		writew(vptr->options.numrx - 1, &regs->RDCSize);
1389 		mac_rx_queue_run(regs);
1390 		mac_rx_queue_wake(regs);
1391 
1392 		writew(vptr->options.numtx - 1, &regs->TDCSize);
1393 
1394 		for (i = 0; i < vptr->tx.numq; i++) {
1395 			writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1396 			mac_tx_queue_run(regs, i);
1397 		}
1398 
1399 		init_flow_control_register(vptr);
1400 
1401 		writel(CR0_STOP, &regs->CR0Clr);
1402 		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1403 
1404 		mii_status = velocity_get_opt_media_mode(vptr);
1405 		netif_stop_queue(vptr->dev);
1406 
1407 		mii_init(vptr, mii_status);
1408 
1409 		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1410 			velocity_print_link_status(vptr);
1411 			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1412 				netif_wake_queue(vptr->dev);
1413 		}
1414 
1415 		enable_flow_control_ability(vptr);
1416 		mac_hw_mibs_init(regs);
1417 		mac_write_int_mask(vptr->int_mask, regs);
1418 		mac_clear_isr(regs);
1419 
1420 	}
1421 }
1422 
1423 static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1424 {
1425 	struct mac_regs __iomem *regs = vptr->mac_regs;
1426 	int avail, dirty, unusable;
1427 
1428 	/*
1429 	 * RD number must be equal to 4X per hardware spec
1430 	 * (programming guide rev 1.20, p.13)
1431 	 */
1432 	if (vptr->rx.filled < 4)
1433 		return;
1434 
1435 	wmb();
1436 
1437 	unusable = vptr->rx.filled & 0x0003;
1438 	dirty = vptr->rx.dirty - unusable;
1439 	for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1440 		dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1441 		vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1442 	}
1443 
1444 	writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1445 	vptr->rx.filled = unusable;
1446 }
1447 
1448 /**
1449  *	velocity_init_dma_rings	-	set up DMA rings
1450  *	@vptr: Velocity to set up
1451  *
1452  *	Allocate PCI mapped DMA rings for the receive and transmit layer
1453  *	to use.
1454  */
1455 static int velocity_init_dma_rings(struct velocity_info *vptr)
1456 {
1457 	struct velocity_opt *opt = &vptr->options;
1458 	const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1459 	const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1460 	struct pci_dev *pdev = vptr->pdev;
1461 	dma_addr_t pool_dma;
1462 	void *pool;
1463 	unsigned int i;
1464 
1465 	/*
1466 	 * Allocate all RD/TD rings a single pool.
1467 	 *
1468 	 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1469 	 * alignment
1470 	 */
1471 	pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1472 				    rx_ring_size, &pool_dma);
1473 	if (!pool) {
1474 		dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1475 			vptr->dev->name);
1476 		return -ENOMEM;
1477 	}
1478 
1479 	vptr->rx.ring = pool;
1480 	vptr->rx.pool_dma = pool_dma;
1481 
1482 	pool += rx_ring_size;
1483 	pool_dma += rx_ring_size;
1484 
1485 	for (i = 0; i < vptr->tx.numq; i++) {
1486 		vptr->tx.rings[i] = pool;
1487 		vptr->tx.pool_dma[i] = pool_dma;
1488 		pool += tx_ring_size;
1489 		pool_dma += tx_ring_size;
1490 	}
1491 
1492 	return 0;
1493 }
1494 
1495 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1496 {
1497 	vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1498 }
1499 
1500 /**
1501  *	velocity_alloc_rx_buf	-	allocate aligned receive buffer
1502  *	@vptr: velocity
1503  *	@idx: ring index
1504  *
1505  *	Allocate a new full sized buffer for the reception of a frame and
1506  *	map it into PCI space for the hardware to use. The hardware
1507  *	requires *64* byte alignment of the buffer which makes life
1508  *	less fun than would be ideal.
1509  */
1510 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1511 {
1512 	struct rx_desc *rd = &(vptr->rx.ring[idx]);
1513 	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1514 
1515 	rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64);
1516 	if (rd_info->skb == NULL)
1517 		return -ENOMEM;
1518 
1519 	/*
1520 	 *	Do the gymnastics to get the buffer head for data at
1521 	 *	64byte alignment.
1522 	 */
1523 	skb_reserve(rd_info->skb,
1524 			64 - ((unsigned long) rd_info->skb->data & 63));
1525 	rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1526 					vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1527 
1528 	/*
1529 	 *	Fill in the descriptor to match
1530 	 */
1531 
1532 	*((u32 *) & (rd->rdesc0)) = 0;
1533 	rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1534 	rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1535 	rd->pa_high = 0;
1536 	return 0;
1537 }
1538 
1539 
1540 static int velocity_rx_refill(struct velocity_info *vptr)
1541 {
1542 	int dirty = vptr->rx.dirty, done = 0;
1543 
1544 	do {
1545 		struct rx_desc *rd = vptr->rx.ring + dirty;
1546 
1547 		/* Fine for an all zero Rx desc at init time as well */
1548 		if (rd->rdesc0.len & OWNED_BY_NIC)
1549 			break;
1550 
1551 		if (!vptr->rx.info[dirty].skb) {
1552 			if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1553 				break;
1554 		}
1555 		done++;
1556 		dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1557 	} while (dirty != vptr->rx.curr);
1558 
1559 	if (done) {
1560 		vptr->rx.dirty = dirty;
1561 		vptr->rx.filled += done;
1562 	}
1563 
1564 	return done;
1565 }
1566 
1567 /**
1568  *	velocity_free_rd_ring	-	free receive ring
1569  *	@vptr: velocity to clean up
1570  *
1571  *	Free the receive buffers for each ring slot and any
1572  *	attached socket buffers that need to go away.
1573  */
1574 static void velocity_free_rd_ring(struct velocity_info *vptr)
1575 {
1576 	int i;
1577 
1578 	if (vptr->rx.info == NULL)
1579 		return;
1580 
1581 	for (i = 0; i < vptr->options.numrx; i++) {
1582 		struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1583 		struct rx_desc *rd = vptr->rx.ring + i;
1584 
1585 		memset(rd, 0, sizeof(*rd));
1586 
1587 		if (!rd_info->skb)
1588 			continue;
1589 		pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1590 				 PCI_DMA_FROMDEVICE);
1591 		rd_info->skb_dma = 0;
1592 
1593 		dev_kfree_skb(rd_info->skb);
1594 		rd_info->skb = NULL;
1595 	}
1596 
1597 	kfree(vptr->rx.info);
1598 	vptr->rx.info = NULL;
1599 }
1600 
1601 /**
1602  *	velocity_init_rd_ring	-	set up receive ring
1603  *	@vptr: velocity to configure
1604  *
1605  *	Allocate and set up the receive buffers for each ring slot and
1606  *	assign them to the network adapter.
1607  */
1608 static int velocity_init_rd_ring(struct velocity_info *vptr)
1609 {
1610 	int ret = -ENOMEM;
1611 
1612 	vptr->rx.info = kcalloc(vptr->options.numrx,
1613 				sizeof(struct velocity_rd_info), GFP_KERNEL);
1614 	if (!vptr->rx.info)
1615 		goto out;
1616 
1617 	velocity_init_rx_ring_indexes(vptr);
1618 
1619 	if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1620 		VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1621 			"%s: failed to allocate RX buffer.\n", vptr->dev->name);
1622 		velocity_free_rd_ring(vptr);
1623 		goto out;
1624 	}
1625 
1626 	ret = 0;
1627 out:
1628 	return ret;
1629 }
1630 
1631 /**
1632  *	velocity_init_td_ring	-	set up transmit ring
1633  *	@vptr:	velocity
1634  *
1635  *	Set up the transmit ring and chain the ring pointers together.
1636  *	Returns zero on success or a negative posix errno code for
1637  *	failure.
1638  */
1639 static int velocity_init_td_ring(struct velocity_info *vptr)
1640 {
1641 	int j;
1642 
1643 	/* Init the TD ring entries */
1644 	for (j = 0; j < vptr->tx.numq; j++) {
1645 
1646 		vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1647 					    sizeof(struct velocity_td_info),
1648 					    GFP_KERNEL);
1649 		if (!vptr->tx.infos[j])	{
1650 			while (--j >= 0)
1651 				kfree(vptr->tx.infos[j]);
1652 			return -ENOMEM;
1653 		}
1654 
1655 		vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1656 	}
1657 	return 0;
1658 }
1659 
1660 /**
1661  *	velocity_free_dma_rings	-	free PCI ring pointers
1662  *	@vptr: Velocity to free from
1663  *
1664  *	Clean up the PCI ring buffers allocated to this velocity.
1665  */
1666 static void velocity_free_dma_rings(struct velocity_info *vptr)
1667 {
1668 	const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1669 		vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1670 
1671 	pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1672 }
1673 
1674 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1675 {
1676 	int ret;
1677 
1678 	velocity_set_rxbufsize(vptr, mtu);
1679 
1680 	ret = velocity_init_dma_rings(vptr);
1681 	if (ret < 0)
1682 		goto out;
1683 
1684 	ret = velocity_init_rd_ring(vptr);
1685 	if (ret < 0)
1686 		goto err_free_dma_rings_0;
1687 
1688 	ret = velocity_init_td_ring(vptr);
1689 	if (ret < 0)
1690 		goto err_free_rd_ring_1;
1691 out:
1692 	return ret;
1693 
1694 err_free_rd_ring_1:
1695 	velocity_free_rd_ring(vptr);
1696 err_free_dma_rings_0:
1697 	velocity_free_dma_rings(vptr);
1698 	goto out;
1699 }
1700 
1701 /**
1702  *	velocity_free_tx_buf	-	free transmit buffer
1703  *	@vptr: velocity
1704  *	@tdinfo: buffer
1705  *
1706  *	Release an transmit buffer. If the buffer was preallocated then
1707  *	recycle it, if not then unmap the buffer.
1708  */
1709 static void velocity_free_tx_buf(struct velocity_info *vptr,
1710 		struct velocity_td_info *tdinfo, struct tx_desc *td)
1711 {
1712 	struct sk_buff *skb = tdinfo->skb;
1713 
1714 	/*
1715 	 *	Don't unmap the pre-allocated tx_bufs
1716 	 */
1717 	if (tdinfo->skb_dma) {
1718 		int i;
1719 
1720 		for (i = 0; i < tdinfo->nskb_dma; i++) {
1721 			size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1722 
1723 			/* For scatter-gather */
1724 			if (skb_shinfo(skb)->nr_frags > 0)
1725 				pktlen = max_t(size_t, pktlen,
1726 						td->td_buf[i].size & ~TD_QUEUE);
1727 
1728 			pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1729 					le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
1730 		}
1731 	}
1732 	dev_kfree_skb_irq(skb);
1733 	tdinfo->skb = NULL;
1734 }
1735 
1736 /*
1737  *	FIXME: could we merge this with velocity_free_tx_buf ?
1738  */
1739 static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1740 							 int q, int n)
1741 {
1742 	struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1743 	int i;
1744 
1745 	if (td_info == NULL)
1746 		return;
1747 
1748 	if (td_info->skb) {
1749 		for (i = 0; i < td_info->nskb_dma; i++) {
1750 			if (td_info->skb_dma[i]) {
1751 				pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1752 					td_info->skb->len, PCI_DMA_TODEVICE);
1753 				td_info->skb_dma[i] = 0;
1754 			}
1755 		}
1756 		dev_kfree_skb(td_info->skb);
1757 		td_info->skb = NULL;
1758 	}
1759 }
1760 
1761 /**
1762  *	velocity_free_td_ring	-	free td ring
1763  *	@vptr: velocity
1764  *
1765  *	Free up the transmit ring for this particular velocity adapter.
1766  *	We free the ring contents but not the ring itself.
1767  */
1768 static void velocity_free_td_ring(struct velocity_info *vptr)
1769 {
1770 	int i, j;
1771 
1772 	for (j = 0; j < vptr->tx.numq; j++) {
1773 		if (vptr->tx.infos[j] == NULL)
1774 			continue;
1775 		for (i = 0; i < vptr->options.numtx; i++)
1776 			velocity_free_td_ring_entry(vptr, j, i);
1777 
1778 		kfree(vptr->tx.infos[j]);
1779 		vptr->tx.infos[j] = NULL;
1780 	}
1781 }
1782 
1783 static void velocity_free_rings(struct velocity_info *vptr)
1784 {
1785 	velocity_free_td_ring(vptr);
1786 	velocity_free_rd_ring(vptr);
1787 	velocity_free_dma_rings(vptr);
1788 }
1789 
1790 /**
1791  *	velocity_error	-	handle error from controller
1792  *	@vptr: velocity
1793  *	@status: card status
1794  *
1795  *	Process an error report from the hardware and attempt to recover
1796  *	the card itself. At the moment we cannot recover from some
1797  *	theoretically impossible errors but this could be fixed using
1798  *	the pci_device_failed logic to bounce the hardware
1799  *
1800  */
1801 static void velocity_error(struct velocity_info *vptr, int status)
1802 {
1803 
1804 	if (status & ISR_TXSTLI) {
1805 		struct mac_regs __iomem *regs = vptr->mac_regs;
1806 
1807 		printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1808 		BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1809 		writew(TRDCSR_RUN, &regs->TDCSRClr);
1810 		netif_stop_queue(vptr->dev);
1811 
1812 		/* FIXME: port over the pci_device_failed code and use it
1813 		   here */
1814 	}
1815 
1816 	if (status & ISR_SRCI) {
1817 		struct mac_regs __iomem *regs = vptr->mac_regs;
1818 		int linked;
1819 
1820 		if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1821 			vptr->mii_status = check_connection_type(regs);
1822 
1823 			/*
1824 			 *	If it is a 3119, disable frame bursting in
1825 			 *	halfduplex mode and enable it in fullduplex
1826 			 *	 mode
1827 			 */
1828 			if (vptr->rev_id < REV_ID_VT3216_A0) {
1829 				if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1830 					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1831 				else
1832 					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1833 			}
1834 			/*
1835 			 *	Only enable CD heart beat counter in 10HD mode
1836 			 */
1837 			if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1838 				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1839 			else
1840 				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1841 
1842 			setup_queue_timers(vptr);
1843 		}
1844 		/*
1845 		 *	Get link status from PHYSR0
1846 		 */
1847 		linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1848 
1849 		if (linked) {
1850 			vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1851 			netif_carrier_on(vptr->dev);
1852 		} else {
1853 			vptr->mii_status |= VELOCITY_LINK_FAIL;
1854 			netif_carrier_off(vptr->dev);
1855 		}
1856 
1857 		velocity_print_link_status(vptr);
1858 		enable_flow_control_ability(vptr);
1859 
1860 		/*
1861 		 *	Re-enable auto-polling because SRCI will disable
1862 		 *	auto-polling
1863 		 */
1864 
1865 		enable_mii_autopoll(regs);
1866 
1867 		if (vptr->mii_status & VELOCITY_LINK_FAIL)
1868 			netif_stop_queue(vptr->dev);
1869 		else
1870 			netif_wake_queue(vptr->dev);
1871 
1872 	}
1873 	if (status & ISR_MIBFI)
1874 		velocity_update_hw_mibs(vptr);
1875 	if (status & ISR_LSTEI)
1876 		mac_rx_queue_wake(vptr->mac_regs);
1877 }
1878 
1879 /**
1880  *	tx_srv		-	transmit interrupt service
1881  *	@vptr; Velocity
1882  *
1883  *	Scan the queues looking for transmitted packets that
1884  *	we can complete and clean up. Update any statistics as
1885  *	necessary/
1886  */
1887 static int velocity_tx_srv(struct velocity_info *vptr)
1888 {
1889 	struct tx_desc *td;
1890 	int qnum;
1891 	int full = 0;
1892 	int idx;
1893 	int works = 0;
1894 	struct velocity_td_info *tdinfo;
1895 	struct net_device_stats *stats = &vptr->dev->stats;
1896 
1897 	for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1898 		for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1899 			idx = (idx + 1) % vptr->options.numtx) {
1900 
1901 			/*
1902 			 *	Get Tx Descriptor
1903 			 */
1904 			td = &(vptr->tx.rings[qnum][idx]);
1905 			tdinfo = &(vptr->tx.infos[qnum][idx]);
1906 
1907 			if (td->tdesc0.len & OWNED_BY_NIC)
1908 				break;
1909 
1910 			if ((works++ > 15))
1911 				break;
1912 
1913 			if (td->tdesc0.TSR & TSR0_TERR) {
1914 				stats->tx_errors++;
1915 				stats->tx_dropped++;
1916 				if (td->tdesc0.TSR & TSR0_CDH)
1917 					stats->tx_heartbeat_errors++;
1918 				if (td->tdesc0.TSR & TSR0_CRS)
1919 					stats->tx_carrier_errors++;
1920 				if (td->tdesc0.TSR & TSR0_ABT)
1921 					stats->tx_aborted_errors++;
1922 				if (td->tdesc0.TSR & TSR0_OWC)
1923 					stats->tx_window_errors++;
1924 			} else {
1925 				stats->tx_packets++;
1926 				stats->tx_bytes += tdinfo->skb->len;
1927 			}
1928 			velocity_free_tx_buf(vptr, tdinfo, td);
1929 			vptr->tx.used[qnum]--;
1930 		}
1931 		vptr->tx.tail[qnum] = idx;
1932 
1933 		if (AVAIL_TD(vptr, qnum) < 1)
1934 			full = 1;
1935 	}
1936 	/*
1937 	 *	Look to see if we should kick the transmit network
1938 	 *	layer for more work.
1939 	 */
1940 	if (netif_queue_stopped(vptr->dev) && (full == 0) &&
1941 	    (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1942 		netif_wake_queue(vptr->dev);
1943 	}
1944 	return works;
1945 }
1946 
1947 /**
1948  *	velocity_rx_csum	-	checksum process
1949  *	@rd: receive packet descriptor
1950  *	@skb: network layer packet buffer
1951  *
1952  *	Process the status bits for the received packet and determine
1953  *	if the checksum was computed and verified by the hardware
1954  */
1955 static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1956 {
1957 	skb_checksum_none_assert(skb);
1958 
1959 	if (rd->rdesc1.CSM & CSM_IPKT) {
1960 		if (rd->rdesc1.CSM & CSM_IPOK) {
1961 			if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1962 					(rd->rdesc1.CSM & CSM_UDPKT)) {
1963 				if (!(rd->rdesc1.CSM & CSM_TUPOK))
1964 					return;
1965 			}
1966 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1967 		}
1968 	}
1969 }
1970 
1971 /**
1972  *	velocity_rx_copy	-	in place Rx copy for small packets
1973  *	@rx_skb: network layer packet buffer candidate
1974  *	@pkt_size: received data size
1975  *	@rd: receive packet descriptor
1976  *	@dev: network device
1977  *
1978  *	Replace the current skb that is scheduled for Rx processing by a
1979  *	shorter, immediately allocated skb, if the received packet is small
1980  *	enough. This function returns a negative value if the received
1981  *	packet is too big or if memory is exhausted.
1982  */
1983 static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1984 			    struct velocity_info *vptr)
1985 {
1986 	int ret = -1;
1987 	if (pkt_size < rx_copybreak) {
1988 		struct sk_buff *new_skb;
1989 
1990 		new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
1991 		if (new_skb) {
1992 			new_skb->ip_summed = rx_skb[0]->ip_summed;
1993 			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1994 			*rx_skb = new_skb;
1995 			ret = 0;
1996 		}
1997 
1998 	}
1999 	return ret;
2000 }
2001 
2002 /**
2003  *	velocity_iph_realign	-	IP header alignment
2004  *	@vptr: velocity we are handling
2005  *	@skb: network layer packet buffer
2006  *	@pkt_size: received data size
2007  *
2008  *	Align IP header on a 2 bytes boundary. This behavior can be
2009  *	configured by the user.
2010  */
2011 static inline void velocity_iph_realign(struct velocity_info *vptr,
2012 					struct sk_buff *skb, int pkt_size)
2013 {
2014 	if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2015 		memmove(skb->data + 2, skb->data, pkt_size);
2016 		skb_reserve(skb, 2);
2017 	}
2018 }
2019 
2020 /**
2021  *	velocity_receive_frame	-	received packet processor
2022  *	@vptr: velocity we are handling
2023  *	@idx: ring index
2024  *
2025  *	A packet has arrived. We process the packet and if appropriate
2026  *	pass the frame up the network stack
2027  */
2028 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2029 {
2030 	void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
2031 	struct net_device_stats *stats = &vptr->dev->stats;
2032 	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2033 	struct rx_desc *rd = &(vptr->rx.ring[idx]);
2034 	int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2035 	struct sk_buff *skb;
2036 
2037 	if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2038 		VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
2039 		stats->rx_length_errors++;
2040 		return -EINVAL;
2041 	}
2042 
2043 	if (rd->rdesc0.RSR & RSR_MAR)
2044 		stats->multicast++;
2045 
2046 	skb = rd_info->skb;
2047 
2048 	pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
2049 				    vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
2050 
2051 	/*
2052 	 *	Drop frame not meeting IEEE 802.3
2053 	 */
2054 
2055 	if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2056 		if (rd->rdesc0.RSR & RSR_RL) {
2057 			stats->rx_length_errors++;
2058 			return -EINVAL;
2059 		}
2060 	}
2061 
2062 	pci_action = pci_dma_sync_single_for_device;
2063 
2064 	velocity_rx_csum(rd, skb);
2065 
2066 	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2067 		velocity_iph_realign(vptr, skb, pkt_len);
2068 		pci_action = pci_unmap_single;
2069 		rd_info->skb = NULL;
2070 	}
2071 
2072 	pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2073 		   PCI_DMA_FROMDEVICE);
2074 
2075 	skb_put(skb, pkt_len - 4);
2076 	skb->protocol = eth_type_trans(skb, vptr->dev);
2077 
2078 	if (rd->rdesc0.RSR & RSR_DETAG) {
2079 		u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2080 
2081 		__vlan_hwaccel_put_tag(skb, vid);
2082 	}
2083 	netif_rx(skb);
2084 
2085 	stats->rx_bytes += pkt_len;
2086 	stats->rx_packets++;
2087 
2088 	return 0;
2089 }
2090 
2091 /**
2092  *	velocity_rx_srv		-	service RX interrupt
2093  *	@vptr: velocity
2094  *
2095  *	Walk the receive ring of the velocity adapter and remove
2096  *	any received packets from the receive queue. Hand the ring
2097  *	slots back to the adapter for reuse.
2098  */
2099 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2100 {
2101 	struct net_device_stats *stats = &vptr->dev->stats;
2102 	int rd_curr = vptr->rx.curr;
2103 	int works = 0;
2104 
2105 	while (works < budget_left) {
2106 		struct rx_desc *rd = vptr->rx.ring + rd_curr;
2107 
2108 		if (!vptr->rx.info[rd_curr].skb)
2109 			break;
2110 
2111 		if (rd->rdesc0.len & OWNED_BY_NIC)
2112 			break;
2113 
2114 		rmb();
2115 
2116 		/*
2117 		 *	Don't drop CE or RL error frame although RXOK is off
2118 		 */
2119 		if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2120 			if (velocity_receive_frame(vptr, rd_curr) < 0)
2121 				stats->rx_dropped++;
2122 		} else {
2123 			if (rd->rdesc0.RSR & RSR_CRC)
2124 				stats->rx_crc_errors++;
2125 			if (rd->rdesc0.RSR & RSR_FAE)
2126 				stats->rx_frame_errors++;
2127 
2128 			stats->rx_dropped++;
2129 		}
2130 
2131 		rd->size |= RX_INTEN;
2132 
2133 		rd_curr++;
2134 		if (rd_curr >= vptr->options.numrx)
2135 			rd_curr = 0;
2136 		works++;
2137 	}
2138 
2139 	vptr->rx.curr = rd_curr;
2140 
2141 	if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2142 		velocity_give_many_rx_descs(vptr);
2143 
2144 	VAR_USED(stats);
2145 	return works;
2146 }
2147 
2148 static int velocity_poll(struct napi_struct *napi, int budget)
2149 {
2150 	struct velocity_info *vptr = container_of(napi,
2151 			struct velocity_info, napi);
2152 	unsigned int rx_done;
2153 	unsigned long flags;
2154 
2155 	spin_lock_irqsave(&vptr->lock, flags);
2156 	/*
2157 	 * Do rx and tx twice for performance (taken from the VIA
2158 	 * out-of-tree driver).
2159 	 */
2160 	rx_done = velocity_rx_srv(vptr, budget / 2);
2161 	velocity_tx_srv(vptr);
2162 	rx_done += velocity_rx_srv(vptr, budget - rx_done);
2163 	velocity_tx_srv(vptr);
2164 
2165 	/* If budget not fully consumed, exit the polling mode */
2166 	if (rx_done < budget) {
2167 		napi_complete(napi);
2168 		mac_enable_int(vptr->mac_regs);
2169 	}
2170 	spin_unlock_irqrestore(&vptr->lock, flags);
2171 
2172 	return rx_done;
2173 }
2174 
2175 /**
2176  *	velocity_intr		-	interrupt callback
2177  *	@irq: interrupt number
2178  *	@dev_instance: interrupting device
2179  *
2180  *	Called whenever an interrupt is generated by the velocity
2181  *	adapter IRQ line. We may not be the source of the interrupt
2182  *	and need to identify initially if we are, and if not exit as
2183  *	efficiently as possible.
2184  */
2185 static irqreturn_t velocity_intr(int irq, void *dev_instance)
2186 {
2187 	struct net_device *dev = dev_instance;
2188 	struct velocity_info *vptr = netdev_priv(dev);
2189 	u32 isr_status;
2190 
2191 	spin_lock(&vptr->lock);
2192 	isr_status = mac_read_isr(vptr->mac_regs);
2193 
2194 	/* Not us ? */
2195 	if (isr_status == 0) {
2196 		spin_unlock(&vptr->lock);
2197 		return IRQ_NONE;
2198 	}
2199 
2200 	/* Ack the interrupt */
2201 	mac_write_isr(vptr->mac_regs, isr_status);
2202 
2203 	if (likely(napi_schedule_prep(&vptr->napi))) {
2204 		mac_disable_int(vptr->mac_regs);
2205 		__napi_schedule(&vptr->napi);
2206 	}
2207 
2208 	if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2209 		velocity_error(vptr, isr_status);
2210 
2211 	spin_unlock(&vptr->lock);
2212 
2213 	return IRQ_HANDLED;
2214 }
2215 
2216 /**
2217  *	velocity_open		-	interface activation callback
2218  *	@dev: network layer device to open
2219  *
2220  *	Called when the network layer brings the interface up. Returns
2221  *	a negative posix error code on failure, or zero on success.
2222  *
2223  *	All the ring allocation and set up is done on open for this
2224  *	adapter to minimise memory usage when inactive
2225  */
2226 static int velocity_open(struct net_device *dev)
2227 {
2228 	struct velocity_info *vptr = netdev_priv(dev);
2229 	int ret;
2230 
2231 	ret = velocity_init_rings(vptr, dev->mtu);
2232 	if (ret < 0)
2233 		goto out;
2234 
2235 	/* Ensure chip is running */
2236 	pci_set_power_state(vptr->pdev, PCI_D0);
2237 
2238 	velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2239 
2240 	ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2241 			  dev->name, dev);
2242 	if (ret < 0) {
2243 		/* Power down the chip */
2244 		pci_set_power_state(vptr->pdev, PCI_D3hot);
2245 		velocity_free_rings(vptr);
2246 		goto out;
2247 	}
2248 
2249 	velocity_give_many_rx_descs(vptr);
2250 
2251 	mac_enable_int(vptr->mac_regs);
2252 	netif_start_queue(dev);
2253 	napi_enable(&vptr->napi);
2254 	vptr->flags |= VELOCITY_FLAGS_OPENED;
2255 out:
2256 	return ret;
2257 }
2258 
2259 /**
2260  *	velocity_shutdown	-	shut down the chip
2261  *	@vptr: velocity to deactivate
2262  *
2263  *	Shuts down the internal operations of the velocity and
2264  *	disables interrupts, autopolling, transmit and receive
2265  */
2266 static void velocity_shutdown(struct velocity_info *vptr)
2267 {
2268 	struct mac_regs __iomem *regs = vptr->mac_regs;
2269 	mac_disable_int(regs);
2270 	writel(CR0_STOP, &regs->CR0Set);
2271 	writew(0xFFFF, &regs->TDCSRClr);
2272 	writeb(0xFF, &regs->RDCSRClr);
2273 	safe_disable_mii_autopoll(regs);
2274 	mac_clear_isr(regs);
2275 }
2276 
2277 /**
2278  *	velocity_change_mtu	-	MTU change callback
2279  *	@dev: network device
2280  *	@new_mtu: desired MTU
2281  *
2282  *	Handle requests from the networking layer for MTU change on
2283  *	this interface. It gets called on a change by the network layer.
2284  *	Return zero for success or negative posix error code.
2285  */
2286 static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2287 {
2288 	struct velocity_info *vptr = netdev_priv(dev);
2289 	int ret = 0;
2290 
2291 	if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2292 		VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2293 				vptr->dev->name);
2294 		ret = -EINVAL;
2295 		goto out_0;
2296 	}
2297 
2298 	if (!netif_running(dev)) {
2299 		dev->mtu = new_mtu;
2300 		goto out_0;
2301 	}
2302 
2303 	if (dev->mtu != new_mtu) {
2304 		struct velocity_info *tmp_vptr;
2305 		unsigned long flags;
2306 		struct rx_info rx;
2307 		struct tx_info tx;
2308 
2309 		tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2310 		if (!tmp_vptr) {
2311 			ret = -ENOMEM;
2312 			goto out_0;
2313 		}
2314 
2315 		tmp_vptr->dev = dev;
2316 		tmp_vptr->pdev = vptr->pdev;
2317 		tmp_vptr->options = vptr->options;
2318 		tmp_vptr->tx.numq = vptr->tx.numq;
2319 
2320 		ret = velocity_init_rings(tmp_vptr, new_mtu);
2321 		if (ret < 0)
2322 			goto out_free_tmp_vptr_1;
2323 
2324 		spin_lock_irqsave(&vptr->lock, flags);
2325 
2326 		netif_stop_queue(dev);
2327 		velocity_shutdown(vptr);
2328 
2329 		rx = vptr->rx;
2330 		tx = vptr->tx;
2331 
2332 		vptr->rx = tmp_vptr->rx;
2333 		vptr->tx = tmp_vptr->tx;
2334 
2335 		tmp_vptr->rx = rx;
2336 		tmp_vptr->tx = tx;
2337 
2338 		dev->mtu = new_mtu;
2339 
2340 		velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2341 
2342 		velocity_give_many_rx_descs(vptr);
2343 
2344 		mac_enable_int(vptr->mac_regs);
2345 		netif_start_queue(dev);
2346 
2347 		spin_unlock_irqrestore(&vptr->lock, flags);
2348 
2349 		velocity_free_rings(tmp_vptr);
2350 
2351 out_free_tmp_vptr_1:
2352 		kfree(tmp_vptr);
2353 	}
2354 out_0:
2355 	return ret;
2356 }
2357 
2358 /**
2359  *	velocity_mii_ioctl		-	MII ioctl handler
2360  *	@dev: network device
2361  *	@ifr: the ifreq block for the ioctl
2362  *	@cmd: the command
2363  *
2364  *	Process MII requests made via ioctl from the network layer. These
2365  *	are used by tools like kudzu to interrogate the link state of the
2366  *	hardware
2367  */
2368 static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2369 {
2370 	struct velocity_info *vptr = netdev_priv(dev);
2371 	struct mac_regs __iomem *regs = vptr->mac_regs;
2372 	unsigned long flags;
2373 	struct mii_ioctl_data *miidata = if_mii(ifr);
2374 	int err;
2375 
2376 	switch (cmd) {
2377 	case SIOCGMIIPHY:
2378 		miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2379 		break;
2380 	case SIOCGMIIREG:
2381 		if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2382 			return -ETIMEDOUT;
2383 		break;
2384 	case SIOCSMIIREG:
2385 		spin_lock_irqsave(&vptr->lock, flags);
2386 		err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2387 		spin_unlock_irqrestore(&vptr->lock, flags);
2388 		check_connection_type(vptr->mac_regs);
2389 		if (err)
2390 			return err;
2391 		break;
2392 	default:
2393 		return -EOPNOTSUPP;
2394 	}
2395 	return 0;
2396 }
2397 
2398 /**
2399  *	velocity_ioctl		-	ioctl entry point
2400  *	@dev: network device
2401  *	@rq: interface request ioctl
2402  *	@cmd: command code
2403  *
2404  *	Called when the user issues an ioctl request to the network
2405  *	device in question. The velocity interface supports MII.
2406  */
2407 static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2408 {
2409 	struct velocity_info *vptr = netdev_priv(dev);
2410 	int ret;
2411 
2412 	/* If we are asked for information and the device is power
2413 	   saving then we need to bring the device back up to talk to it */
2414 
2415 	if (!netif_running(dev))
2416 		pci_set_power_state(vptr->pdev, PCI_D0);
2417 
2418 	switch (cmd) {
2419 	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
2420 	case SIOCGMIIREG:	/* Read MII PHY register. */
2421 	case SIOCSMIIREG:	/* Write to MII PHY register. */
2422 		ret = velocity_mii_ioctl(dev, rq, cmd);
2423 		break;
2424 
2425 	default:
2426 		ret = -EOPNOTSUPP;
2427 	}
2428 	if (!netif_running(dev))
2429 		pci_set_power_state(vptr->pdev, PCI_D3hot);
2430 
2431 
2432 	return ret;
2433 }
2434 
2435 /**
2436  *	velocity_get_status	-	statistics callback
2437  *	@dev: network device
2438  *
2439  *	Callback from the network layer to allow driver statistics
2440  *	to be resynchronized with hardware collected state. In the
2441  *	case of the velocity we need to pull the MIB counters from
2442  *	the hardware into the counters before letting the network
2443  *	layer display them.
2444  */
2445 static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2446 {
2447 	struct velocity_info *vptr = netdev_priv(dev);
2448 
2449 	/* If the hardware is down, don't touch MII */
2450 	if (!netif_running(dev))
2451 		return &dev->stats;
2452 
2453 	spin_lock_irq(&vptr->lock);
2454 	velocity_update_hw_mibs(vptr);
2455 	spin_unlock_irq(&vptr->lock);
2456 
2457 	dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2458 	dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2459 	dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2460 
2461 //  unsigned long   rx_dropped;     /* no space in linux buffers    */
2462 	dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2463 	/* detailed rx_errors: */
2464 //  unsigned long   rx_length_errors;
2465 //  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2466 	dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2467 //  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2468 //  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2469 //  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2470 
2471 	/* detailed tx_errors */
2472 //  unsigned long   tx_fifo_errors;
2473 
2474 	return &dev->stats;
2475 }
2476 
2477 /**
2478  *	velocity_close		-	close adapter callback
2479  *	@dev: network device
2480  *
2481  *	Callback from the network layer when the velocity is being
2482  *	deactivated by the network layer
2483  */
2484 static int velocity_close(struct net_device *dev)
2485 {
2486 	struct velocity_info *vptr = netdev_priv(dev);
2487 
2488 	napi_disable(&vptr->napi);
2489 	netif_stop_queue(dev);
2490 	velocity_shutdown(vptr);
2491 
2492 	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2493 		velocity_get_ip(vptr);
2494 
2495 	free_irq(vptr->pdev->irq, dev);
2496 
2497 	velocity_free_rings(vptr);
2498 
2499 	vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2500 	return 0;
2501 }
2502 
2503 /**
2504  *	velocity_xmit		-	transmit packet callback
2505  *	@skb: buffer to transmit
2506  *	@dev: network device
2507  *
2508  *	Called by the networ layer to request a packet is queued to
2509  *	the velocity. Returns zero on success.
2510  */
2511 static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2512 				 struct net_device *dev)
2513 {
2514 	struct velocity_info *vptr = netdev_priv(dev);
2515 	int qnum = 0;
2516 	struct tx_desc *td_ptr;
2517 	struct velocity_td_info *tdinfo;
2518 	unsigned long flags;
2519 	int pktlen;
2520 	int index, prev;
2521 	int i = 0;
2522 
2523 	if (skb_padto(skb, ETH_ZLEN))
2524 		goto out;
2525 
2526 	/* The hardware can handle at most 7 memory segments, so merge
2527 	 * the skb if there are more */
2528 	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2529 		kfree_skb(skb);
2530 		return NETDEV_TX_OK;
2531 	}
2532 
2533 	pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2534 			max_t(unsigned int, skb->len, ETH_ZLEN) :
2535 				skb_headlen(skb);
2536 
2537 	spin_lock_irqsave(&vptr->lock, flags);
2538 
2539 	index = vptr->tx.curr[qnum];
2540 	td_ptr = &(vptr->tx.rings[qnum][index]);
2541 	tdinfo = &(vptr->tx.infos[qnum][index]);
2542 
2543 	td_ptr->tdesc1.TCR = TCR0_TIC;
2544 	td_ptr->td_buf[0].size &= ~TD_QUEUE;
2545 
2546 	/*
2547 	 *	Map the linear network buffer into PCI space and
2548 	 *	add it to the transmit ring.
2549 	 */
2550 	tdinfo->skb = skb;
2551 	tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2552 	td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2553 	td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2554 	td_ptr->td_buf[0].pa_high = 0;
2555 	td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2556 
2557 	/* Handle fragments */
2558 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2559 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2560 
2561 		tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
2562 							  frag, 0,
2563 							  skb_frag_size(frag),
2564 							  DMA_TO_DEVICE);
2565 
2566 		td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2567 		td_ptr->td_buf[i + 1].pa_high = 0;
2568 		td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2569 	}
2570 	tdinfo->nskb_dma = i + 1;
2571 
2572 	td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2573 
2574 	if (vlan_tx_tag_present(skb)) {
2575 		td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2576 		td_ptr->tdesc1.TCR |= TCR0_VETAG;
2577 	}
2578 
2579 	/*
2580 	 *	Handle hardware checksum
2581 	 */
2582 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2583 		const struct iphdr *ip = ip_hdr(skb);
2584 		if (ip->protocol == IPPROTO_TCP)
2585 			td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2586 		else if (ip->protocol == IPPROTO_UDP)
2587 			td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2588 		td_ptr->tdesc1.TCR |= TCR0_IPCK;
2589 	}
2590 
2591 	prev = index - 1;
2592 	if (prev < 0)
2593 		prev = vptr->options.numtx - 1;
2594 	td_ptr->tdesc0.len |= OWNED_BY_NIC;
2595 	vptr->tx.used[qnum]++;
2596 	vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2597 
2598 	if (AVAIL_TD(vptr, qnum) < 1)
2599 		netif_stop_queue(dev);
2600 
2601 	td_ptr = &(vptr->tx.rings[qnum][prev]);
2602 	td_ptr->td_buf[0].size |= TD_QUEUE;
2603 	mac_tx_queue_wake(vptr->mac_regs, qnum);
2604 
2605 	spin_unlock_irqrestore(&vptr->lock, flags);
2606 out:
2607 	return NETDEV_TX_OK;
2608 }
2609 
2610 static const struct net_device_ops velocity_netdev_ops = {
2611 	.ndo_open		= velocity_open,
2612 	.ndo_stop		= velocity_close,
2613 	.ndo_start_xmit		= velocity_xmit,
2614 	.ndo_get_stats		= velocity_get_stats,
2615 	.ndo_validate_addr	= eth_validate_addr,
2616 	.ndo_set_mac_address	= eth_mac_addr,
2617 	.ndo_set_rx_mode	= velocity_set_multi,
2618 	.ndo_change_mtu		= velocity_change_mtu,
2619 	.ndo_do_ioctl		= velocity_ioctl,
2620 	.ndo_vlan_rx_add_vid	= velocity_vlan_rx_add_vid,
2621 	.ndo_vlan_rx_kill_vid	= velocity_vlan_rx_kill_vid,
2622 };
2623 
2624 /**
2625  *	velocity_init_info	-	init private data
2626  *	@pdev: PCI device
2627  *	@vptr: Velocity info
2628  *	@info: Board type
2629  *
2630  *	Set up the initial velocity_info struct for the device that has been
2631  *	discovered.
2632  */
2633 static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
2634 			       const struct velocity_info_tbl *info)
2635 {
2636 	memset(vptr, 0, sizeof(struct velocity_info));
2637 
2638 	vptr->pdev = pdev;
2639 	vptr->chip_id = info->chip_id;
2640 	vptr->tx.numq = info->txqueue;
2641 	vptr->multicast_limit = MCAM_SIZE;
2642 	spin_lock_init(&vptr->lock);
2643 }
2644 
2645 /**
2646  *	velocity_get_pci_info	-	retrieve PCI info for device
2647  *	@vptr: velocity device
2648  *	@pdev: PCI device it matches
2649  *
2650  *	Retrieve the PCI configuration space data that interests us from
2651  *	the kernel PCI layer
2652  */
2653 static int velocity_get_pci_info(struct velocity_info *vptr,
2654 				 struct pci_dev *pdev)
2655 {
2656 	vptr->rev_id = pdev->revision;
2657 
2658 	pci_set_master(pdev);
2659 
2660 	vptr->ioaddr = pci_resource_start(pdev, 0);
2661 	vptr->memaddr = pci_resource_start(pdev, 1);
2662 
2663 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2664 		dev_err(&pdev->dev,
2665 			   "region #0 is not an I/O resource, aborting.\n");
2666 		return -EINVAL;
2667 	}
2668 
2669 	if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2670 		dev_err(&pdev->dev,
2671 			   "region #1 is an I/O resource, aborting.\n");
2672 		return -EINVAL;
2673 	}
2674 
2675 	if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2676 		dev_err(&pdev->dev, "region #1 is too small.\n");
2677 		return -EINVAL;
2678 	}
2679 	vptr->pdev = pdev;
2680 
2681 	return 0;
2682 }
2683 
2684 /**
2685  *	velocity_print_info	-	per driver data
2686  *	@vptr: velocity
2687  *
2688  *	Print per driver data as the kernel driver finds Velocity
2689  *	hardware
2690  */
2691 static void velocity_print_info(struct velocity_info *vptr)
2692 {
2693 	struct net_device *dev = vptr->dev;
2694 
2695 	printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2696 	printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2697 		dev->name, dev->dev_addr);
2698 }
2699 
2700 static u32 velocity_get_link(struct net_device *dev)
2701 {
2702 	struct velocity_info *vptr = netdev_priv(dev);
2703 	struct mac_regs __iomem *regs = vptr->mac_regs;
2704 	return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2705 }
2706 
2707 /**
2708  *	velocity_found1		-	set up discovered velocity card
2709  *	@pdev: PCI device
2710  *	@ent: PCI device table entry that matched
2711  *
2712  *	Configure a discovered adapter from scratch. Return a negative
2713  *	errno error code on failure paths.
2714  */
2715 static int velocity_found1(struct pci_dev *pdev,
2716 			   const struct pci_device_id *ent)
2717 {
2718 	static int first = 1;
2719 	struct net_device *dev;
2720 	int i;
2721 	const char *drv_string;
2722 	const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2723 	struct velocity_info *vptr;
2724 	struct mac_regs __iomem *regs;
2725 	int ret = -ENOMEM;
2726 
2727 	/* FIXME: this driver, like almost all other ethernet drivers,
2728 	 * can support more than MAX_UNITS.
2729 	 */
2730 	if (velocity_nics >= MAX_UNITS) {
2731 		dev_notice(&pdev->dev, "already found %d NICs.\n",
2732 			   velocity_nics);
2733 		return -ENODEV;
2734 	}
2735 
2736 	dev = alloc_etherdev(sizeof(struct velocity_info));
2737 	if (!dev)
2738 		goto out;
2739 
2740 	/* Chain it all together */
2741 
2742 	SET_NETDEV_DEV(dev, &pdev->dev);
2743 	vptr = netdev_priv(dev);
2744 
2745 
2746 	if (first) {
2747 		printk(KERN_INFO "%s Ver. %s\n",
2748 			VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2749 		printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2750 		printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2751 		first = 0;
2752 	}
2753 
2754 	velocity_init_info(pdev, vptr, info);
2755 
2756 	vptr->dev = dev;
2757 
2758 	ret = pci_enable_device(pdev);
2759 	if (ret < 0)
2760 		goto err_free_dev;
2761 
2762 	ret = velocity_get_pci_info(vptr, pdev);
2763 	if (ret < 0) {
2764 		/* error message already printed */
2765 		goto err_disable;
2766 	}
2767 
2768 	ret = pci_request_regions(pdev, VELOCITY_NAME);
2769 	if (ret < 0) {
2770 		dev_err(&pdev->dev, "No PCI resources.\n");
2771 		goto err_disable;
2772 	}
2773 
2774 	regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2775 	if (regs == NULL) {
2776 		ret = -EIO;
2777 		goto err_release_res;
2778 	}
2779 
2780 	vptr->mac_regs = regs;
2781 
2782 	mac_wol_reset(regs);
2783 
2784 	for (i = 0; i < 6; i++)
2785 		dev->dev_addr[i] = readb(&regs->PAR[i]);
2786 
2787 
2788 	drv_string = dev_driver_string(&pdev->dev);
2789 
2790 	velocity_get_options(&vptr->options, velocity_nics, drv_string);
2791 
2792 	/*
2793 	 *	Mask out the options cannot be set to the chip
2794 	 */
2795 
2796 	vptr->options.flags &= info->flags;
2797 
2798 	/*
2799 	 *	Enable the chip specified capbilities
2800 	 */
2801 
2802 	vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2803 
2804 	vptr->wol_opts = vptr->options.wol_opts;
2805 	vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2806 
2807 	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2808 
2809 	dev->netdev_ops = &velocity_netdev_ops;
2810 	dev->ethtool_ops = &velocity_ethtool_ops;
2811 	netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2812 
2813 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
2814 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2815 		NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2816 
2817 	ret = register_netdev(dev);
2818 	if (ret < 0)
2819 		goto err_iounmap;
2820 
2821 	if (!velocity_get_link(dev)) {
2822 		netif_carrier_off(dev);
2823 		vptr->mii_status |= VELOCITY_LINK_FAIL;
2824 	}
2825 
2826 	velocity_print_info(vptr);
2827 	pci_set_drvdata(pdev, dev);
2828 
2829 	/* and leave the chip powered down */
2830 
2831 	pci_set_power_state(pdev, PCI_D3hot);
2832 	velocity_nics++;
2833 out:
2834 	return ret;
2835 
2836 err_iounmap:
2837 	iounmap(regs);
2838 err_release_res:
2839 	pci_release_regions(pdev);
2840 err_disable:
2841 	pci_disable_device(pdev);
2842 err_free_dev:
2843 	free_netdev(dev);
2844 	goto out;
2845 }
2846 
2847 #ifdef CONFIG_PM
2848 /**
2849  *	wol_calc_crc		-	WOL CRC
2850  *	@pattern: data pattern
2851  *	@mask_pattern: mask
2852  *
2853  *	Compute the wake on lan crc hashes for the packet header
2854  *	we are interested in.
2855  */
2856 static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2857 {
2858 	u16 crc = 0xFFFF;
2859 	u8 mask;
2860 	int i, j;
2861 
2862 	for (i = 0; i < size; i++) {
2863 		mask = mask_pattern[i];
2864 
2865 		/* Skip this loop if the mask equals to zero */
2866 		if (mask == 0x00)
2867 			continue;
2868 
2869 		for (j = 0; j < 8; j++) {
2870 			if ((mask & 0x01) == 0) {
2871 				mask >>= 1;
2872 				continue;
2873 			}
2874 			mask >>= 1;
2875 			crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2876 		}
2877 	}
2878 	/*	Finally, invert the result once to get the correct data */
2879 	crc = ~crc;
2880 	return bitrev32(crc) >> 16;
2881 }
2882 
2883 /**
2884  *	velocity_set_wol	-	set up for wake on lan
2885  *	@vptr: velocity to set WOL status on
2886  *
2887  *	Set a card up for wake on lan either by unicast or by
2888  *	ARP packet.
2889  *
2890  *	FIXME: check static buffer is safe here
2891  */
2892 static int velocity_set_wol(struct velocity_info *vptr)
2893 {
2894 	struct mac_regs __iomem *regs = vptr->mac_regs;
2895 	enum speed_opt spd_dpx = vptr->options.spd_dpx;
2896 	static u8 buf[256];
2897 	int i;
2898 
2899 	static u32 mask_pattern[2][4] = {
2900 		{0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
2901 		{0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}	 /* Magic Packet */
2902 	};
2903 
2904 	writew(0xFFFF, &regs->WOLCRClr);
2905 	writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
2906 	writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
2907 
2908 	/*
2909 	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
2910 	   writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
2911 	 */
2912 
2913 	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2914 		writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
2915 
2916 	if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2917 		struct arp_packet *arp = (struct arp_packet *) buf;
2918 		u16 crc;
2919 		memset(buf, 0, sizeof(struct arp_packet) + 7);
2920 
2921 		for (i = 0; i < 4; i++)
2922 			writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
2923 
2924 		arp->type = htons(ETH_P_ARP);
2925 		arp->ar_op = htons(1);
2926 
2927 		memcpy(arp->ar_tip, vptr->ip_addr, 4);
2928 
2929 		crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
2930 				(u8 *) & mask_pattern[0][0]);
2931 
2932 		writew(crc, &regs->PatternCRC[0]);
2933 		writew(WOLCR_ARP_EN, &regs->WOLCRSet);
2934 	}
2935 
2936 	BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
2937 	BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
2938 
2939 	writew(0x0FFF, &regs->WOLSRClr);
2940 
2941 	if (spd_dpx == SPD_DPX_1000_FULL)
2942 		goto mac_done;
2943 
2944 	if (spd_dpx != SPD_DPX_AUTO)
2945 		goto advertise_done;
2946 
2947 	if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2948 		if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2949 			MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
2950 
2951 		MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
2952 	}
2953 
2954 	if (vptr->mii_status & VELOCITY_SPEED_1000)
2955 		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2956 
2957 advertise_done:
2958 	BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2959 
2960 	{
2961 		u8 GCR;
2962 		GCR = readb(&regs->CHIPGCR);
2963 		GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
2964 		writeb(GCR, &regs->CHIPGCR);
2965 	}
2966 
2967 mac_done:
2968 	BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2969 	/* Turn on SWPTAG just before entering power mode */
2970 	BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
2971 	/* Go to bed ..... */
2972 	BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
2973 
2974 	return 0;
2975 }
2976 
2977 /**
2978  *	velocity_save_context	-	save registers
2979  *	@vptr: velocity
2980  *	@context: buffer for stored context
2981  *
2982  *	Retrieve the current configuration from the velocity hardware
2983  *	and stash it in the context structure, for use by the context
2984  *	restore functions. This allows us to save things we need across
2985  *	power down states
2986  */
2987 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
2988 {
2989 	struct mac_regs __iomem *regs = vptr->mac_regs;
2990 	u16 i;
2991 	u8 __iomem *ptr = (u8 __iomem *)regs;
2992 
2993 	for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
2994 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2995 
2996 	for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
2997 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2998 
2999 	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3000 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3001 
3002 }
3003 
3004 static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3005 {
3006 	struct net_device *dev = pci_get_drvdata(pdev);
3007 	struct velocity_info *vptr = netdev_priv(dev);
3008 	unsigned long flags;
3009 
3010 	if (!netif_running(vptr->dev))
3011 		return 0;
3012 
3013 	netif_device_detach(vptr->dev);
3014 
3015 	spin_lock_irqsave(&vptr->lock, flags);
3016 	pci_save_state(pdev);
3017 
3018 	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3019 		velocity_get_ip(vptr);
3020 		velocity_save_context(vptr, &vptr->context);
3021 		velocity_shutdown(vptr);
3022 		velocity_set_wol(vptr);
3023 		pci_enable_wake(pdev, PCI_D3hot, 1);
3024 		pci_set_power_state(pdev, PCI_D3hot);
3025 	} else {
3026 		velocity_save_context(vptr, &vptr->context);
3027 		velocity_shutdown(vptr);
3028 		pci_disable_device(pdev);
3029 		pci_set_power_state(pdev, pci_choose_state(pdev, state));
3030 	}
3031 
3032 	spin_unlock_irqrestore(&vptr->lock, flags);
3033 	return 0;
3034 }
3035 
3036 /**
3037  *	velocity_restore_context	-	restore registers
3038  *	@vptr: velocity
3039  *	@context: buffer for stored context
3040  *
3041  *	Reload the register configuration from the velocity context
3042  *	created by velocity_save_context.
3043  */
3044 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3045 {
3046 	struct mac_regs __iomem *regs = vptr->mac_regs;
3047 	int i;
3048 	u8 __iomem *ptr = (u8 __iomem *)regs;
3049 
3050 	for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3051 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3052 
3053 	/* Just skip cr0 */
3054 	for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3055 		/* Clear */
3056 		writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3057 		/* Set */
3058 		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3059 	}
3060 
3061 	for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3062 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3063 
3064 	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3065 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3066 
3067 	for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3068 		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3069 }
3070 
3071 static int velocity_resume(struct pci_dev *pdev)
3072 {
3073 	struct net_device *dev = pci_get_drvdata(pdev);
3074 	struct velocity_info *vptr = netdev_priv(dev);
3075 	unsigned long flags;
3076 	int i;
3077 
3078 	if (!netif_running(vptr->dev))
3079 		return 0;
3080 
3081 	pci_set_power_state(pdev, PCI_D0);
3082 	pci_enable_wake(pdev, 0, 0);
3083 	pci_restore_state(pdev);
3084 
3085 	mac_wol_reset(vptr->mac_regs);
3086 
3087 	spin_lock_irqsave(&vptr->lock, flags);
3088 	velocity_restore_context(vptr, &vptr->context);
3089 	velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3090 	mac_disable_int(vptr->mac_regs);
3091 
3092 	velocity_tx_srv(vptr);
3093 
3094 	for (i = 0; i < vptr->tx.numq; i++) {
3095 		if (vptr->tx.used[i])
3096 			mac_tx_queue_wake(vptr->mac_regs, i);
3097 	}
3098 
3099 	mac_enable_int(vptr->mac_regs);
3100 	spin_unlock_irqrestore(&vptr->lock, flags);
3101 	netif_device_attach(vptr->dev);
3102 
3103 	return 0;
3104 }
3105 #endif
3106 
3107 /*
3108  *	Definition for our device driver. The PCI layer interface
3109  *	uses this to handle all our card discover and plugging
3110  */
3111 static struct pci_driver velocity_driver = {
3112 	.name		= VELOCITY_NAME,
3113 	.id_table	= velocity_id_table,
3114 	.probe		= velocity_found1,
3115 	.remove		= velocity_remove1,
3116 #ifdef CONFIG_PM
3117 	.suspend	= velocity_suspend,
3118 	.resume		= velocity_resume,
3119 #endif
3120 };
3121 
3122 
3123 /**
3124  *	velocity_ethtool_up	-	pre hook for ethtool
3125  *	@dev: network device
3126  *
3127  *	Called before an ethtool operation. We need to make sure the
3128  *	chip is out of D3 state before we poke at it.
3129  */
3130 static int velocity_ethtool_up(struct net_device *dev)
3131 {
3132 	struct velocity_info *vptr = netdev_priv(dev);
3133 	if (!netif_running(dev))
3134 		pci_set_power_state(vptr->pdev, PCI_D0);
3135 	return 0;
3136 }
3137 
3138 /**
3139  *	velocity_ethtool_down	-	post hook for ethtool
3140  *	@dev: network device
3141  *
3142  *	Called after an ethtool operation. Restore the chip back to D3
3143  *	state if it isn't running.
3144  */
3145 static void velocity_ethtool_down(struct net_device *dev)
3146 {
3147 	struct velocity_info *vptr = netdev_priv(dev);
3148 	if (!netif_running(dev))
3149 		pci_set_power_state(vptr->pdev, PCI_D3hot);
3150 }
3151 
3152 static int velocity_get_settings(struct net_device *dev,
3153 				 struct ethtool_cmd *cmd)
3154 {
3155 	struct velocity_info *vptr = netdev_priv(dev);
3156 	struct mac_regs __iomem *regs = vptr->mac_regs;
3157 	u32 status;
3158 	status = check_connection_type(vptr->mac_regs);
3159 
3160 	cmd->supported = SUPPORTED_TP |
3161 			SUPPORTED_Autoneg |
3162 			SUPPORTED_10baseT_Half |
3163 			SUPPORTED_10baseT_Full |
3164 			SUPPORTED_100baseT_Half |
3165 			SUPPORTED_100baseT_Full |
3166 			SUPPORTED_1000baseT_Half |
3167 			SUPPORTED_1000baseT_Full;
3168 
3169 	cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3170 	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3171 		cmd->advertising |=
3172 			ADVERTISED_10baseT_Half |
3173 			ADVERTISED_10baseT_Full |
3174 			ADVERTISED_100baseT_Half |
3175 			ADVERTISED_100baseT_Full |
3176 			ADVERTISED_1000baseT_Half |
3177 			ADVERTISED_1000baseT_Full;
3178 	} else {
3179 		switch (vptr->options.spd_dpx) {
3180 		case SPD_DPX_1000_FULL:
3181 			cmd->advertising |= ADVERTISED_1000baseT_Full;
3182 			break;
3183 		case SPD_DPX_100_HALF:
3184 			cmd->advertising |= ADVERTISED_100baseT_Half;
3185 			break;
3186 		case SPD_DPX_100_FULL:
3187 			cmd->advertising |= ADVERTISED_100baseT_Full;
3188 			break;
3189 		case SPD_DPX_10_HALF:
3190 			cmd->advertising |= ADVERTISED_10baseT_Half;
3191 			break;
3192 		case SPD_DPX_10_FULL:
3193 			cmd->advertising |= ADVERTISED_10baseT_Full;
3194 			break;
3195 		default:
3196 			break;
3197 		}
3198 	}
3199 
3200 	if (status & VELOCITY_SPEED_1000)
3201 		ethtool_cmd_speed_set(cmd, SPEED_1000);
3202 	else if (status & VELOCITY_SPEED_100)
3203 		ethtool_cmd_speed_set(cmd, SPEED_100);
3204 	else
3205 		ethtool_cmd_speed_set(cmd, SPEED_10);
3206 
3207 	cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3208 	cmd->port = PORT_TP;
3209 	cmd->transceiver = XCVR_INTERNAL;
3210 	cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3211 
3212 	if (status & VELOCITY_DUPLEX_FULL)
3213 		cmd->duplex = DUPLEX_FULL;
3214 	else
3215 		cmd->duplex = DUPLEX_HALF;
3216 
3217 	return 0;
3218 }
3219 
3220 static int velocity_set_settings(struct net_device *dev,
3221 				 struct ethtool_cmd *cmd)
3222 {
3223 	struct velocity_info *vptr = netdev_priv(dev);
3224 	u32 speed = ethtool_cmd_speed(cmd);
3225 	u32 curr_status;
3226 	u32 new_status = 0;
3227 	int ret = 0;
3228 
3229 	curr_status = check_connection_type(vptr->mac_regs);
3230 	curr_status &= (~VELOCITY_LINK_FAIL);
3231 
3232 	new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3233 	new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3234 	new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3235 	new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3236 	new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3237 
3238 	if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3239 	    (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3240 		ret = -EINVAL;
3241 	} else {
3242 		enum speed_opt spd_dpx;
3243 
3244 		if (new_status & VELOCITY_AUTONEG_ENABLE)
3245 			spd_dpx = SPD_DPX_AUTO;
3246 		else if ((new_status & VELOCITY_SPEED_1000) &&
3247 			 (new_status & VELOCITY_DUPLEX_FULL)) {
3248 			spd_dpx = SPD_DPX_1000_FULL;
3249 		} else if (new_status & VELOCITY_SPEED_100)
3250 			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3251 				SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3252 		else if (new_status & VELOCITY_SPEED_10)
3253 			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3254 				SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3255 		else
3256 			return -EOPNOTSUPP;
3257 
3258 		vptr->options.spd_dpx = spd_dpx;
3259 
3260 		velocity_set_media_mode(vptr, new_status);
3261 	}
3262 
3263 	return ret;
3264 }
3265 
3266 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3267 {
3268 	struct velocity_info *vptr = netdev_priv(dev);
3269 	strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3270 	strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3271 	strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
3272 }
3273 
3274 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3275 {
3276 	struct velocity_info *vptr = netdev_priv(dev);
3277 	wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3278 	wol->wolopts |= WAKE_MAGIC;
3279 	/*
3280 	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3281 		   wol.wolopts|=WAKE_PHY;
3282 			 */
3283 	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3284 		wol->wolopts |= WAKE_UCAST;
3285 	if (vptr->wol_opts & VELOCITY_WOL_ARP)
3286 		wol->wolopts |= WAKE_ARP;
3287 	memcpy(&wol->sopass, vptr->wol_passwd, 6);
3288 }
3289 
3290 static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3291 {
3292 	struct velocity_info *vptr = netdev_priv(dev);
3293 
3294 	if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3295 		return -EFAULT;
3296 	vptr->wol_opts = VELOCITY_WOL_MAGIC;
3297 
3298 	/*
3299 	   if (wol.wolopts & WAKE_PHY) {
3300 	   vptr->wol_opts|=VELOCITY_WOL_PHY;
3301 	   vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3302 	   }
3303 	 */
3304 
3305 	if (wol->wolopts & WAKE_MAGIC) {
3306 		vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3307 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3308 	}
3309 	if (wol->wolopts & WAKE_UCAST) {
3310 		vptr->wol_opts |= VELOCITY_WOL_UCAST;
3311 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3312 	}
3313 	if (wol->wolopts & WAKE_ARP) {
3314 		vptr->wol_opts |= VELOCITY_WOL_ARP;
3315 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3316 	}
3317 	memcpy(vptr->wol_passwd, wol->sopass, 6);
3318 	return 0;
3319 }
3320 
3321 static u32 velocity_get_msglevel(struct net_device *dev)
3322 {
3323 	return msglevel;
3324 }
3325 
3326 static void velocity_set_msglevel(struct net_device *dev, u32 value)
3327 {
3328 	 msglevel = value;
3329 }
3330 
3331 static int get_pending_timer_val(int val)
3332 {
3333 	int mult_bits = val >> 6;
3334 	int mult = 1;
3335 
3336 	switch (mult_bits)
3337 	{
3338 	case 1:
3339 		mult = 4; break;
3340 	case 2:
3341 		mult = 16; break;
3342 	case 3:
3343 		mult = 64; break;
3344 	case 0:
3345 	default:
3346 		break;
3347 	}
3348 
3349 	return (val & 0x3f) * mult;
3350 }
3351 
3352 static void set_pending_timer_val(int *val, u32 us)
3353 {
3354 	u8 mult = 0;
3355 	u8 shift = 0;
3356 
3357 	if (us >= 0x3f) {
3358 		mult = 1; /* mult with 4 */
3359 		shift = 2;
3360 	}
3361 	if (us >= 0x3f * 4) {
3362 		mult = 2; /* mult with 16 */
3363 		shift = 4;
3364 	}
3365 	if (us >= 0x3f * 16) {
3366 		mult = 3; /* mult with 64 */
3367 		shift = 6;
3368 	}
3369 
3370 	*val = (mult << 6) | ((us >> shift) & 0x3f);
3371 }
3372 
3373 
3374 static int velocity_get_coalesce(struct net_device *dev,
3375 		struct ethtool_coalesce *ecmd)
3376 {
3377 	struct velocity_info *vptr = netdev_priv(dev);
3378 
3379 	ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3380 	ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3381 
3382 	ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3383 	ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3384 
3385 	return 0;
3386 }
3387 
3388 static int velocity_set_coalesce(struct net_device *dev,
3389 		struct ethtool_coalesce *ecmd)
3390 {
3391 	struct velocity_info *vptr = netdev_priv(dev);
3392 	int max_us = 0x3f * 64;
3393 	unsigned long flags;
3394 
3395 	/* 6 bits of  */
3396 	if (ecmd->tx_coalesce_usecs > max_us)
3397 		return -EINVAL;
3398 	if (ecmd->rx_coalesce_usecs > max_us)
3399 		return -EINVAL;
3400 
3401 	if (ecmd->tx_max_coalesced_frames > 0xff)
3402 		return -EINVAL;
3403 	if (ecmd->rx_max_coalesced_frames > 0xff)
3404 		return -EINVAL;
3405 
3406 	vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3407 	vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3408 
3409 	set_pending_timer_val(&vptr->options.rxqueue_timer,
3410 			ecmd->rx_coalesce_usecs);
3411 	set_pending_timer_val(&vptr->options.txqueue_timer,
3412 			ecmd->tx_coalesce_usecs);
3413 
3414 	/* Setup the interrupt suppression and queue timers */
3415 	spin_lock_irqsave(&vptr->lock, flags);
3416 	mac_disable_int(vptr->mac_regs);
3417 	setup_adaptive_interrupts(vptr);
3418 	setup_queue_timers(vptr);
3419 
3420 	mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3421 	mac_clear_isr(vptr->mac_regs);
3422 	mac_enable_int(vptr->mac_regs);
3423 	spin_unlock_irqrestore(&vptr->lock, flags);
3424 
3425 	return 0;
3426 }
3427 
3428 static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3429 	"rx_all",
3430 	"rx_ok",
3431 	"tx_ok",
3432 	"rx_error",
3433 	"rx_runt_ok",
3434 	"rx_runt_err",
3435 	"rx_64",
3436 	"tx_64",
3437 	"rx_65_to_127",
3438 	"tx_65_to_127",
3439 	"rx_128_to_255",
3440 	"tx_128_to_255",
3441 	"rx_256_to_511",
3442 	"tx_256_to_511",
3443 	"rx_512_to_1023",
3444 	"tx_512_to_1023",
3445 	"rx_1024_to_1518",
3446 	"tx_1024_to_1518",
3447 	"tx_ether_collisions",
3448 	"rx_crc_errors",
3449 	"rx_jumbo",
3450 	"tx_jumbo",
3451 	"rx_mac_control_frames",
3452 	"tx_mac_control_frames",
3453 	"rx_frame_alignement_errors",
3454 	"rx_long_ok",
3455 	"rx_long_err",
3456 	"tx_sqe_errors",
3457 	"rx_no_buf",
3458 	"rx_symbol_errors",
3459 	"in_range_length_errors",
3460 	"late_collisions"
3461 };
3462 
3463 static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3464 {
3465 	switch (sset) {
3466 	case ETH_SS_STATS:
3467 		memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3468 		break;
3469 	}
3470 }
3471 
3472 static int velocity_get_sset_count(struct net_device *dev, int sset)
3473 {
3474 	switch (sset) {
3475 	case ETH_SS_STATS:
3476 		return ARRAY_SIZE(velocity_gstrings);
3477 	default:
3478 		return -EOPNOTSUPP;
3479 	}
3480 }
3481 
3482 static void velocity_get_ethtool_stats(struct net_device *dev,
3483 				       struct ethtool_stats *stats, u64 *data)
3484 {
3485 	if (netif_running(dev)) {
3486 		struct velocity_info *vptr = netdev_priv(dev);
3487 		u32 *p = vptr->mib_counter;
3488 		int i;
3489 
3490 		spin_lock_irq(&vptr->lock);
3491 		velocity_update_hw_mibs(vptr);
3492 		spin_unlock_irq(&vptr->lock);
3493 
3494 		for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3495 			*data++ = *p++;
3496 	}
3497 }
3498 
3499 static const struct ethtool_ops velocity_ethtool_ops = {
3500 	.get_settings		= velocity_get_settings,
3501 	.set_settings		= velocity_set_settings,
3502 	.get_drvinfo		= velocity_get_drvinfo,
3503 	.get_wol		= velocity_ethtool_get_wol,
3504 	.set_wol		= velocity_ethtool_set_wol,
3505 	.get_msglevel		= velocity_get_msglevel,
3506 	.set_msglevel		= velocity_set_msglevel,
3507 	.get_link		= velocity_get_link,
3508 	.get_strings		= velocity_get_strings,
3509 	.get_sset_count		= velocity_get_sset_count,
3510 	.get_ethtool_stats	= velocity_get_ethtool_stats,
3511 	.get_coalesce		= velocity_get_coalesce,
3512 	.set_coalesce		= velocity_set_coalesce,
3513 	.begin			= velocity_ethtool_up,
3514 	.complete		= velocity_ethtool_down
3515 };
3516 
3517 #if defined(CONFIG_PM) && defined(CONFIG_INET)
3518 static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3519 {
3520 	struct in_ifaddr *ifa = ptr;
3521 	struct net_device *dev = ifa->ifa_dev->dev;
3522 
3523 	if (dev_net(dev) == &init_net &&
3524 	    dev->netdev_ops == &velocity_netdev_ops)
3525 		velocity_get_ip(netdev_priv(dev));
3526 
3527 	return NOTIFY_DONE;
3528 }
3529 
3530 static struct notifier_block velocity_inetaddr_notifier = {
3531 	.notifier_call	= velocity_netdev_event,
3532 };
3533 
3534 static void velocity_register_notifier(void)
3535 {
3536 	register_inetaddr_notifier(&velocity_inetaddr_notifier);
3537 }
3538 
3539 static void velocity_unregister_notifier(void)
3540 {
3541 	unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3542 }
3543 
3544 #else
3545 
3546 #define velocity_register_notifier()	do {} while (0)
3547 #define velocity_unregister_notifier()	do {} while (0)
3548 
3549 #endif	/* defined(CONFIG_PM) && defined(CONFIG_INET) */
3550 
3551 /**
3552  *	velocity_init_module	-	load time function
3553  *
3554  *	Called when the velocity module is loaded. The PCI driver
3555  *	is registered with the PCI layer, and in turn will call
3556  *	the probe functions for each velocity adapter installed
3557  *	in the system.
3558  */
3559 static int __init velocity_init_module(void)
3560 {
3561 	int ret;
3562 
3563 	velocity_register_notifier();
3564 	ret = pci_register_driver(&velocity_driver);
3565 	if (ret < 0)
3566 		velocity_unregister_notifier();
3567 	return ret;
3568 }
3569 
3570 /**
3571  *	velocity_cleanup	-	module unload
3572  *
3573  *	When the velocity hardware is unloaded this function is called.
3574  *	It will clean up the notifiers and the unregister the PCI
3575  *	driver interface for this hardware. This in turn cleans up
3576  *	all discovered interfaces before returning from the function
3577  */
3578 static void __exit velocity_cleanup_module(void)
3579 {
3580 	velocity_unregister_notifier();
3581 	pci_unregister_driver(&velocity_driver);
3582 }
3583 
3584 module_init(velocity_init_module);
3585 module_exit(velocity_cleanup_module);
3586