xref: /openbmc/linux/drivers/net/fddi/defxx.c (revision 84d517f3)
1 /*
2  * File Name:
3  *   defxx.c
4  *
5  * Copyright Information:
6  *   Copyright Digital Equipment Corporation 1996.
7  *
8  *   This software may be used and distributed according to the terms of
9  *   the GNU General Public License, incorporated herein by reference.
10  *
11  * Abstract:
12  *   A Linux device driver supporting the Digital Equipment Corporation
13  *   FDDI TURBOchannel, EISA and PCI controller families.  Supported
14  *   adapters include:
15  *
16  *		DEC FDDIcontroller/TURBOchannel (DEFTA)
17  *		DEC FDDIcontroller/EISA         (DEFEA)
18  *		DEC FDDIcontroller/PCI          (DEFPA)
19  *
20  * The original author:
21  *   LVS	Lawrence V. Stefani <lstefani@yahoo.com>
22  *
23  * Maintainers:
24  *   macro	Maciej W. Rozycki <macro@linux-mips.org>
25  *
26  * Credits:
27  *   I'd like to thank Patricia Cross for helping me get started with
28  *   Linux, David Davies for a lot of help upgrading and configuring
29  *   my development system and for answering many OS and driver
30  *   development questions, and Alan Cox for recommendations and
31  *   integration help on getting FDDI support into Linux.  LVS
32  *
33  * Driver Architecture:
34  *   The driver architecture is largely based on previous driver work
35  *   for other operating systems.  The upper edge interface and
36  *   functions were largely taken from existing Linux device drivers
37  *   such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
38  *   driver.
39  *
40  *   Adapter Probe -
41  *		The driver scans for supported EISA adapters by reading the
42  *		SLOT ID register for each EISA slot and making a match
43  *		against the expected value.
44  *
45  *   Bus-Specific Initialization -
46  *		This driver currently supports both EISA and PCI controller
47  *		families.  While the custom DMA chip and FDDI logic is similar
48  *		or identical, the bus logic is very different.  After
49  *		initialization, the	only bus-specific differences is in how the
50  *		driver enables and disables interrupts.  Other than that, the
51  *		run-time critical code behaves the same on both families.
52  *		It's important to note that both adapter families are configured
53  *		to I/O map, rather than memory map, the adapter registers.
54  *
55  *   Driver Open/Close -
56  *		In the driver open routine, the driver ISR (interrupt service
57  *		routine) is registered and the adapter is brought to an
58  *		operational state.  In the driver close routine, the opposite
59  *		occurs; the driver ISR is deregistered and the adapter is
60  *		brought to a safe, but closed state.  Users may use consecutive
61  *		commands to bring the adapter up and down as in the following
62  *		example:
63  *					ifconfig fddi0 up
64  *					ifconfig fddi0 down
65  *					ifconfig fddi0 up
66  *
67  *   Driver Shutdown -
68  *		Apparently, there is no shutdown or halt routine support under
69  *		Linux.  This routine would be called during "reboot" or
70  *		"shutdown" to allow the driver to place the adapter in a safe
71  *		state before a warm reboot occurs.  To be really safe, the user
72  *		should close the adapter before shutdown (eg. ifconfig fddi0 down)
73  *		to ensure that the adapter DMA engine is taken off-line.  However,
74  *		the current driver code anticipates this problem and always issues
75  *		a soft reset of the adapter	at the beginning of driver initialization.
76  *		A future driver enhancement in this area may occur in 2.1.X where
77  *		Alan indicated that a shutdown handler may be implemented.
78  *
79  *   Interrupt Service Routine -
80  *		The driver supports shared interrupts, so the ISR is registered for
81  *		each board with the appropriate flag and the pointer to that board's
82  *		device structure.  This provides the context during interrupt
83  *		processing to support shared interrupts and multiple boards.
84  *
85  *		Interrupt enabling/disabling can occur at many levels.  At the host
86  *		end, you can disable system interrupts, or disable interrupts at the
87  *		PIC (on Intel systems).  Across the bus, both EISA and PCI adapters
88  *		have a bus-logic chip interrupt enable/disable as well as a DMA
89  *		controller interrupt enable/disable.
90  *
91  *		The driver currently enables and disables adapter interrupts at the
92  *		bus-logic chip and assumes that Linux will take care of clearing or
93  *		acknowledging any host-based interrupt chips.
94  *
95  *   Control Functions -
96  *		Control functions are those used to support functions such as adding
97  *		or deleting multicast addresses, enabling or disabling packet
98  *		reception filters, or other custom/proprietary commands.  Presently,
99  *		the driver supports the "get statistics", "set multicast list", and
100  *		"set mac address" functions defined by Linux.  A list of possible
101  *		enhancements include:
102  *
103  *				- Custom ioctl interface for executing port interface commands
104  *				- Custom ioctl interface for adding unicast addresses to
105  *				  adapter CAM (to support bridge functions).
106  *				- Custom ioctl interface for supporting firmware upgrades.
107  *
108  *   Hardware (port interface) Support Routines -
109  *		The driver function names that start with "dfx_hw_" represent
110  *		low-level port interface routines that are called frequently.  They
111  *		include issuing a DMA or port control command to the adapter,
112  *		resetting the adapter, or reading the adapter state.  Since the
113  *		driver initialization and run-time code must make calls into the
114  *		port interface, these routines were written to be as generic and
115  *		usable as possible.
116  *
117  *   Receive Path -
118  *		The adapter DMA engine supports a 256 entry receive descriptor block
119  *		of which up to 255 entries can be used at any given time.  The
120  *		architecture is a standard producer, consumer, completion model in
121  *		which the driver "produces" receive buffers to the adapter, the
122  *		adapter "consumes" the receive buffers by DMAing incoming packet data,
123  *		and the driver "completes" the receive buffers by servicing the
124  *		incoming packet, then "produces" a new buffer and starts the cycle
125  *		again.  Receive buffers can be fragmented in up to 16 fragments
126  *		(descriptor	entries).  For simplicity, this driver posts
127  *		single-fragment receive buffers of 4608 bytes, then allocates a
128  *		sk_buff, copies the data, then reposts the buffer.  To reduce CPU
129  *		utilization, a better approach would be to pass up the receive
130  *		buffer (no extra copy) then allocate and post a replacement buffer.
131  *		This is a performance enhancement that should be looked into at
132  *		some point.
133  *
134  *   Transmit Path -
135  *		Like the receive path, the adapter DMA engine supports a 256 entry
136  *		transmit descriptor block of which up to 255 entries can be used at
137  *		any	given time.  Transmit buffers can be fragmented	in up to 255
138  *		fragments (descriptor entries).  This driver always posts one
139  *		fragment per transmit packet request.
140  *
141  *		The fragment contains the entire packet from FC to end of data.
142  *		Before posting the buffer to the adapter, the driver sets a three-byte
143  *		packet request header (PRH) which is required by the Motorola MAC chip
144  *		used on the adapters.  The PRH tells the MAC the type of token to
145  *		receive/send, whether or not to generate and append the CRC, whether
146  *		synchronous or asynchronous framing is used, etc.  Since the PRH
147  *		definition is not necessarily consistent across all FDDI chipsets,
148  *		the driver, rather than the common FDDI packet handler routines,
149  *		sets these bytes.
150  *
151  *		To reduce the amount of descriptor fetches needed per transmit request,
152  *		the driver takes advantage of the fact that there are at least three
153  *		bytes available before the skb->data field on the outgoing transmit
154  *		request.  This is guaranteed by having fddi_setup() in net_init.c set
155  *		dev->hard_header_len to 24 bytes.  21 bytes accounts for the largest
156  *		header in an 802.2 SNAP frame.  The other 3 bytes are the extra "pad"
157  *		bytes which we'll use to store the PRH.
158  *
159  *		There's a subtle advantage to adding these pad bytes to the
160  *		hard_header_len, it ensures that the data portion of the packet for
161  *		an 802.2 SNAP frame is longword aligned.  Other FDDI driver
162  *		implementations may not need the extra padding and can start copying
163  *		or DMAing directly from the FC byte which starts at skb->data.  Should
164  *		another driver implementation need ADDITIONAL padding, the net_init.c
165  *		module should be updated and dev->hard_header_len should be increased.
166  *		NOTE: To maintain the alignment on the data portion of the packet,
167  *		dev->hard_header_len should always be evenly divisible by 4 and at
168  *		least 24 bytes in size.
169  *
170  * Modification History:
171  *		Date		Name	Description
172  *		16-Aug-96	LVS		Created.
173  *		20-Aug-96	LVS		Updated dfx_probe so that version information
174  *							string is only displayed if 1 or more cards are
175  *							found.  Changed dfx_rcv_queue_process to copy
176  *							3 NULL bytes before FC to ensure that data is
177  *							longword aligned in receive buffer.
178  *		09-Sep-96	LVS		Updated dfx_ctl_set_multicast_list to enable
179  *							LLC group promiscuous mode if multicast list
180  *							is too large.  LLC individual/group promiscuous
181  *							mode is now disabled if IFF_PROMISC flag not set.
182  *							dfx_xmt_queue_pkt no longer checks for NULL skb
183  *							on Alan Cox recommendation.  Added node address
184  *							override support.
185  *		12-Sep-96	LVS		Reset current address to factory address during
186  *							device open.  Updated transmit path to post a
187  *							single fragment which includes PRH->end of data.
188  *		Mar 2000	AC		Did various cleanups for 2.3.x
189  *		Jun 2000	jgarzik		PCI and resource alloc cleanups
190  *		Jul 2000	tjeerd		Much cleanup and some bug fixes
191  *		Sep 2000	tjeerd		Fix leak on unload, cosmetic code cleanup
192  *		Feb 2001			Skb allocation fixes
193  *		Feb 2001	davej		PCI enable cleanups.
194  *		04 Aug 2003	macro		Converted to the DMA API.
195  *		14 Aug 2004	macro		Fix device names reported.
196  *		14 Jun 2005	macro		Use irqreturn_t.
197  *		23 Oct 2006	macro		Big-endian host support.
198  *		14 Dec 2006	macro		TURBOchannel support.
199  */
200 
201 /* Include files */
202 #include <linux/bitops.h>
203 #include <linux/compiler.h>
204 #include <linux/delay.h>
205 #include <linux/dma-mapping.h>
206 #include <linux/eisa.h>
207 #include <linux/errno.h>
208 #include <linux/fddidevice.h>
209 #include <linux/interrupt.h>
210 #include <linux/ioport.h>
211 #include <linux/kernel.h>
212 #include <linux/module.h>
213 #include <linux/netdevice.h>
214 #include <linux/pci.h>
215 #include <linux/skbuff.h>
216 #include <linux/slab.h>
217 #include <linux/string.h>
218 #include <linux/tc.h>
219 
220 #include <asm/byteorder.h>
221 #include <asm/io.h>
222 
223 #include "defxx.h"
224 
225 /* Version information string should be updated prior to each new release!  */
226 #define DRV_NAME "defxx"
227 #define DRV_VERSION "v1.10"
228 #define DRV_RELDATE "2006/12/14"
229 
230 static char version[] =
231 	DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
232 	"  Lawrence V. Stefani and others\n";
233 
234 #define DYNAMIC_BUFFERS 1
235 
236 #define SKBUFF_RX_COPYBREAK 200
237 /*
238  * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
239  * alignment for compatibility with old EISA boards.
240  */
241 #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
242 
243 #ifdef CONFIG_EISA
244 #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
245 #else
246 #define DFX_BUS_EISA(dev) 0
247 #endif
248 
249 #ifdef CONFIG_TC
250 #define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
251 #else
252 #define DFX_BUS_TC(dev) 0
253 #endif
254 
255 #ifdef CONFIG_DEFXX_MMIO
256 #define DFX_MMIO 1
257 #else
258 #define DFX_MMIO 0
259 #endif
260 
261 /* Define module-wide (static) routines */
262 
263 static void		dfx_bus_init(struct net_device *dev);
264 static void		dfx_bus_uninit(struct net_device *dev);
265 static void		dfx_bus_config_check(DFX_board_t *bp);
266 
267 static int		dfx_driver_init(struct net_device *dev,
268 					const char *print_name,
269 					resource_size_t bar_start);
270 static int		dfx_adap_init(DFX_board_t *bp, int get_buffers);
271 
272 static int		dfx_open(struct net_device *dev);
273 static int		dfx_close(struct net_device *dev);
274 
275 static void		dfx_int_pr_halt_id(DFX_board_t *bp);
276 static void		dfx_int_type_0_process(DFX_board_t *bp);
277 static void		dfx_int_common(struct net_device *dev);
278 static irqreturn_t	dfx_interrupt(int irq, void *dev_id);
279 
280 static struct		net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
281 static void		dfx_ctl_set_multicast_list(struct net_device *dev);
282 static int		dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
283 static int		dfx_ctl_update_cam(DFX_board_t *bp);
284 static int		dfx_ctl_update_filters(DFX_board_t *bp);
285 
286 static int		dfx_hw_dma_cmd_req(DFX_board_t *bp);
287 static int		dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32	command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
288 static void		dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
289 static int		dfx_hw_adap_state_rd(DFX_board_t *bp);
290 static int		dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
291 
292 static int		dfx_rcv_init(DFX_board_t *bp, int get_buffers);
293 static void		dfx_rcv_queue_process(DFX_board_t *bp);
294 static void		dfx_rcv_flush(DFX_board_t *bp);
295 
296 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
297 				     struct net_device *dev);
298 static int		dfx_xmt_done(DFX_board_t *bp);
299 static void		dfx_xmt_flush(DFX_board_t *bp);
300 
301 /* Define module-wide (static) variables */
302 
303 static struct pci_driver dfx_pci_driver;
304 static struct eisa_driver dfx_eisa_driver;
305 static struct tc_driver dfx_tc_driver;
306 
307 
308 /*
309  * =======================
310  * = dfx_port_write_long =
311  * = dfx_port_read_long  =
312  * =======================
313  *
314  * Overview:
315  *   Routines for reading and writing values from/to adapter
316  *
317  * Returns:
318  *   None
319  *
320  * Arguments:
321  *   bp		- pointer to board information
322  *   offset	- register offset from base I/O address
323  *   data	- for dfx_port_write_long, this is a value to write;
324  *		  for dfx_port_read_long, this is a pointer to store
325  *		  the read value
326  *
327  * Functional Description:
328  *   These routines perform the correct operation to read or write
329  *   the adapter register.
330  *
331  *   EISA port block base addresses are based on the slot number in which the
332  *   controller is installed.  For example, if the EISA controller is installed
333  *   in slot 4, the port block base address is 0x4000.  If the controller is
334  *   installed in slot 2, the port block base address is 0x2000, and so on.
335  *   This port block can be used to access PDQ, ESIC, and DEFEA on-board
336  *   registers using the register offsets defined in DEFXX.H.
337  *
338  *   PCI port block base addresses are assigned by the PCI BIOS or system
339  *   firmware.  There is one 128 byte port block which can be accessed.  It
340  *   allows for I/O mapping of both PDQ and PFI registers using the register
341  *   offsets defined in DEFXX.H.
342  *
343  * Return Codes:
344  *   None
345  *
346  * Assumptions:
347  *   bp->base is a valid base I/O address for this adapter.
348  *   offset is a valid register offset for this adapter.
349  *
350  * Side Effects:
351  *   Rather than produce macros for these functions, these routines
352  *   are defined using "inline" to ensure that the compiler will
353  *   generate inline code and not waste a procedure call and return.
354  *   This provides all the benefits of macros, but with the
355  *   advantage of strict data type checking.
356  */
357 
358 static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
359 {
360 	writel(data, bp->base.mem + offset);
361 	mb();
362 }
363 
364 static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
365 {
366 	outl(data, bp->base.port + offset);
367 }
368 
369 static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
370 {
371 	struct device __maybe_unused *bdev = bp->bus_dev;
372 	int dfx_bus_tc = DFX_BUS_TC(bdev);
373 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
374 
375 	if (dfx_use_mmio)
376 		dfx_writel(bp, offset, data);
377 	else
378 		dfx_outl(bp, offset, data);
379 }
380 
381 
382 static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
383 {
384 	mb();
385 	*data = readl(bp->base.mem + offset);
386 }
387 
388 static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
389 {
390 	*data = inl(bp->base.port + offset);
391 }
392 
393 static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
394 {
395 	struct device __maybe_unused *bdev = bp->bus_dev;
396 	int dfx_bus_tc = DFX_BUS_TC(bdev);
397 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
398 
399 	if (dfx_use_mmio)
400 		dfx_readl(bp, offset, data);
401 	else
402 		dfx_inl(bp, offset, data);
403 }
404 
405 
406 /*
407  * ================
408  * = dfx_get_bars =
409  * ================
410  *
411  * Overview:
412  *   Retrieves the address range used to access control and status
413  *   registers.
414  *
415  * Returns:
416  *   None
417  *
418  * Arguments:
419  *   bdev	- pointer to device information
420  *   bar_start	- pointer to store the start address
421  *   bar_len	- pointer to store the length of the area
422  *
423  * Assumptions:
424  *   I am sure there are some.
425  *
426  * Side Effects:
427  *   None
428  */
429 static void dfx_get_bars(struct device *bdev,
430 			 resource_size_t *bar_start, resource_size_t *bar_len)
431 {
432 	int dfx_bus_pci = dev_is_pci(bdev);
433 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
434 	int dfx_bus_tc = DFX_BUS_TC(bdev);
435 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
436 
437 	if (dfx_bus_pci) {
438 		int num = dfx_use_mmio ? 0 : 1;
439 
440 		*bar_start = pci_resource_start(to_pci_dev(bdev), num);
441 		*bar_len = pci_resource_len(to_pci_dev(bdev), num);
442 	}
443 	if (dfx_bus_eisa) {
444 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
445 		resource_size_t bar;
446 
447 		if (dfx_use_mmio) {
448 			bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
449 			bar <<= 8;
450 			bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
451 			bar <<= 8;
452 			bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
453 			bar <<= 16;
454 			*bar_start = bar;
455 			bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
456 			bar <<= 8;
457 			bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
458 			bar <<= 8;
459 			bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
460 			bar <<= 16;
461 			*bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
462 		} else {
463 			*bar_start = base_addr;
464 			*bar_len = PI_ESIC_K_CSR_IO_LEN;
465 		}
466 	}
467 	if (dfx_bus_tc) {
468 		*bar_start = to_tc_dev(bdev)->resource.start +
469 			     PI_TC_K_CSR_OFFSET;
470 		*bar_len = PI_TC_K_CSR_LEN;
471 	}
472 }
473 
474 static const struct net_device_ops dfx_netdev_ops = {
475 	.ndo_open		= dfx_open,
476 	.ndo_stop		= dfx_close,
477 	.ndo_start_xmit		= dfx_xmt_queue_pkt,
478 	.ndo_get_stats		= dfx_ctl_get_stats,
479 	.ndo_set_rx_mode	= dfx_ctl_set_multicast_list,
480 	.ndo_set_mac_address	= dfx_ctl_set_mac_address,
481 };
482 
483 /*
484  * ================
485  * = dfx_register =
486  * ================
487  *
488  * Overview:
489  *   Initializes a supported FDDI controller
490  *
491  * Returns:
492  *   Condition code
493  *
494  * Arguments:
495  *   bdev - pointer to device information
496  *
497  * Functional Description:
498  *
499  * Return Codes:
500  *   0		 - This device (fddi0, fddi1, etc) configured successfully
501  *   -EBUSY      - Failed to get resources, or dfx_driver_init failed.
502  *
503  * Assumptions:
504  *   It compiles so it should work :-( (PCI cards do :-)
505  *
506  * Side Effects:
507  *   Device structures for FDDI adapters (fddi0, fddi1, etc) are
508  *   initialized and the board resources are read and stored in
509  *   the device structure.
510  */
511 static int dfx_register(struct device *bdev)
512 {
513 	static int version_disp;
514 	int dfx_bus_pci = dev_is_pci(bdev);
515 	int dfx_bus_tc = DFX_BUS_TC(bdev);
516 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
517 	const char *print_name = dev_name(bdev);
518 	struct net_device *dev;
519 	DFX_board_t	  *bp;			/* board pointer */
520 	resource_size_t bar_start = 0;		/* pointer to port */
521 	resource_size_t bar_len = 0;		/* resource length */
522 	int alloc_size;				/* total buffer size used */
523 	struct resource *region;
524 	int err = 0;
525 
526 	if (!version_disp) {	/* display version info if adapter is found */
527 		version_disp = 1;	/* set display flag to TRUE so that */
528 		printk(version);	/* we only display this string ONCE */
529 	}
530 
531 	dev = alloc_fddidev(sizeof(*bp));
532 	if (!dev) {
533 		printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
534 		       print_name);
535 		return -ENOMEM;
536 	}
537 
538 	/* Enable PCI device. */
539 	if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
540 		printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
541 		       print_name);
542 		goto err_out;
543 	}
544 
545 	SET_NETDEV_DEV(dev, bdev);
546 
547 	bp = netdev_priv(dev);
548 	bp->bus_dev = bdev;
549 	dev_set_drvdata(bdev, dev);
550 
551 	dfx_get_bars(bdev, &bar_start, &bar_len);
552 
553 	if (dfx_use_mmio)
554 		region = request_mem_region(bar_start, bar_len, print_name);
555 	else
556 		region = request_region(bar_start, bar_len, print_name);
557 	if (!region) {
558 		printk(KERN_ERR "%s: Cannot reserve I/O resource "
559 		       "0x%lx @ 0x%lx, aborting\n",
560 		       print_name, (long)bar_len, (long)bar_start);
561 		err = -EBUSY;
562 		goto err_out_disable;
563 	}
564 
565 	/* Set up I/O base address. */
566 	if (dfx_use_mmio) {
567 		bp->base.mem = ioremap_nocache(bar_start, bar_len);
568 		if (!bp->base.mem) {
569 			printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
570 			err = -ENOMEM;
571 			goto err_out_region;
572 		}
573 	} else {
574 		bp->base.port = bar_start;
575 		dev->base_addr = bar_start;
576 	}
577 
578 	/* Initialize new device structure */
579 	dev->netdev_ops			= &dfx_netdev_ops;
580 
581 	if (dfx_bus_pci)
582 		pci_set_master(to_pci_dev(bdev));
583 
584 	if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
585 		err = -ENODEV;
586 		goto err_out_unmap;
587 	}
588 
589 	err = register_netdev(dev);
590 	if (err)
591 		goto err_out_kfree;
592 
593 	printk("%s: registered as %s\n", print_name, dev->name);
594 	return 0;
595 
596 err_out_kfree:
597 	alloc_size = sizeof(PI_DESCR_BLOCK) +
598 		     PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
599 #ifndef DYNAMIC_BUFFERS
600 		     (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
601 #endif
602 		     sizeof(PI_CONSUMER_BLOCK) +
603 		     (PI_ALIGN_K_DESC_BLK - 1);
604 	if (bp->kmalloced)
605 		dma_free_coherent(bdev, alloc_size,
606 				  bp->kmalloced, bp->kmalloced_dma);
607 
608 err_out_unmap:
609 	if (dfx_use_mmio)
610 		iounmap(bp->base.mem);
611 
612 err_out_region:
613 	if (dfx_use_mmio)
614 		release_mem_region(bar_start, bar_len);
615 	else
616 		release_region(bar_start, bar_len);
617 
618 err_out_disable:
619 	if (dfx_bus_pci)
620 		pci_disable_device(to_pci_dev(bdev));
621 
622 err_out:
623 	free_netdev(dev);
624 	return err;
625 }
626 
627 
628 /*
629  * ================
630  * = dfx_bus_init =
631  * ================
632  *
633  * Overview:
634  *   Initializes the bus-specific controller logic.
635  *
636  * Returns:
637  *   None
638  *
639  * Arguments:
640  *   dev - pointer to device information
641  *
642  * Functional Description:
643  *   Determine and save adapter IRQ in device table,
644  *   then perform bus-specific logic initialization.
645  *
646  * Return Codes:
647  *   None
648  *
649  * Assumptions:
650  *   bp->base has already been set with the proper
651  *	 base I/O address for this device.
652  *
653  * Side Effects:
654  *   Interrupts are enabled at the adapter bus-specific logic.
655  *   Note:  Interrupts at the DMA engine (PDQ chip) are not
656  *   enabled yet.
657  */
658 
659 static void dfx_bus_init(struct net_device *dev)
660 {
661 	DFX_board_t *bp = netdev_priv(dev);
662 	struct device *bdev = bp->bus_dev;
663 	int dfx_bus_pci = dev_is_pci(bdev);
664 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
665 	int dfx_bus_tc = DFX_BUS_TC(bdev);
666 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
667 	u8 val;
668 
669 	DBG_printk("In dfx_bus_init...\n");
670 
671 	/* Initialize a pointer back to the net_device struct */
672 	bp->dev = dev;
673 
674 	/* Initialize adapter based on bus type */
675 
676 	if (dfx_bus_tc)
677 		dev->irq = to_tc_dev(bdev)->interrupt;
678 	if (dfx_bus_eisa) {
679 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
680 
681 		/* Get the interrupt level from the ESIC chip.  */
682 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
683 		val &= PI_CONFIG_STAT_0_M_IRQ;
684 		val >>= PI_CONFIG_STAT_0_V_IRQ;
685 
686 		switch (val) {
687 		case PI_CONFIG_STAT_0_IRQ_K_9:
688 			dev->irq = 9;
689 			break;
690 
691 		case PI_CONFIG_STAT_0_IRQ_K_10:
692 			dev->irq = 10;
693 			break;
694 
695 		case PI_CONFIG_STAT_0_IRQ_K_11:
696 			dev->irq = 11;
697 			break;
698 
699 		case PI_CONFIG_STAT_0_IRQ_K_15:
700 			dev->irq = 15;
701 			break;
702 		}
703 
704 		/*
705 		 * Enable memory decoding (MEMCS0) and/or port decoding
706 		 * (IOCS1/IOCS0) as appropriate in Function Control
707 		 * Register.  One of the port chip selects seems to be
708 		 * used for the Burst Holdoff register, but this bit of
709 		 * documentation is missing and as yet it has not been
710 		 * determined which of the two.  This is also the reason
711 		 * the size of the decoded port range is twice as large
712 		 * as one required by the PDQ.
713 		 */
714 
715 		/* Set the decode range of the board.  */
716 		val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT);
717 		outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val);
718 		outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0);
719 		outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val);
720 		outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0);
721 		val = PI_ESIC_K_CSR_IO_LEN - 1;
722 		outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff);
723 		outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff);
724 		outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff);
725 		outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff);
726 
727 		/* Enable the decoders.  */
728 		val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
729 		if (dfx_use_mmio)
730 			val |= PI_FUNCTION_CNTRL_M_MEMCS0;
731 		outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val);
732 
733 		/*
734 		 * Enable access to the rest of the module
735 		 * (including PDQ and packet memory).
736 		 */
737 		val = PI_SLOT_CNTRL_M_ENB;
738 		outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val);
739 
740 		/*
741 		 * Map PDQ registers into memory or port space.  This is
742 		 * done with a bit in the Burst Holdoff register.
743 		 */
744 		val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
745 		if (dfx_use_mmio)
746 			val |= PI_BURST_HOLDOFF_V_MEM_MAP;
747 		else
748 			val &= ~PI_BURST_HOLDOFF_V_MEM_MAP;
749 		outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val);
750 
751 		/* Enable interrupts at EISA bus interface chip (ESIC) */
752 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
753 		val |= PI_CONFIG_STAT_0_M_INT_ENB;
754 		outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
755 	}
756 	if (dfx_bus_pci) {
757 		struct pci_dev *pdev = to_pci_dev(bdev);
758 
759 		/* Get the interrupt level from the PCI Configuration Table */
760 
761 		dev->irq = pdev->irq;
762 
763 		/* Check Latency Timer and set if less than minimal */
764 
765 		pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
766 		if (val < PFI_K_LAT_TIMER_MIN) {
767 			val = PFI_K_LAT_TIMER_DEF;
768 			pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
769 		}
770 
771 		/* Enable interrupts at PCI bus interface chip (PFI) */
772 		val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
773 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
774 	}
775 }
776 
777 /*
778  * ==================
779  * = dfx_bus_uninit =
780  * ==================
781  *
782  * Overview:
783  *   Uninitializes the bus-specific controller logic.
784  *
785  * Returns:
786  *   None
787  *
788  * Arguments:
789  *   dev - pointer to device information
790  *
791  * Functional Description:
792  *   Perform bus-specific logic uninitialization.
793  *
794  * Return Codes:
795  *   None
796  *
797  * Assumptions:
798  *   bp->base has already been set with the proper
799  *	 base I/O address for this device.
800  *
801  * Side Effects:
802  *   Interrupts are disabled at the adapter bus-specific logic.
803  */
804 
805 static void dfx_bus_uninit(struct net_device *dev)
806 {
807 	DFX_board_t *bp = netdev_priv(dev);
808 	struct device *bdev = bp->bus_dev;
809 	int dfx_bus_pci = dev_is_pci(bdev);
810 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
811 	u8 val;
812 
813 	DBG_printk("In dfx_bus_uninit...\n");
814 
815 	/* Uninitialize adapter based on bus type */
816 
817 	if (dfx_bus_eisa) {
818 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
819 
820 		/* Disable interrupts at EISA bus interface chip (ESIC) */
821 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
822 		val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
823 		outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
824 	}
825 	if (dfx_bus_pci) {
826 		/* Disable interrupts at PCI bus interface chip (PFI) */
827 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
828 	}
829 }
830 
831 
832 /*
833  * ========================
834  * = dfx_bus_config_check =
835  * ========================
836  *
837  * Overview:
838  *   Checks the configuration (burst size, full-duplex, etc.)  If any parameters
839  *   are illegal, then this routine will set new defaults.
840  *
841  * Returns:
842  *   None
843  *
844  * Arguments:
845  *   bp - pointer to board information
846  *
847  * Functional Description:
848  *   For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
849  *   PDQ, and all FDDI PCI controllers, all values are legal.
850  *
851  * Return Codes:
852  *   None
853  *
854  * Assumptions:
855  *   dfx_adap_init has NOT been called yet so burst size and other items have
856  *   not been set.
857  *
858  * Side Effects:
859  *   None
860  */
861 
862 static void dfx_bus_config_check(DFX_board_t *bp)
863 {
864 	struct device __maybe_unused *bdev = bp->bus_dev;
865 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
866 	int	status;				/* return code from adapter port control call */
867 	u32	host_data;			/* LW data returned from port control call */
868 
869 	DBG_printk("In dfx_bus_config_check...\n");
870 
871 	/* Configuration check only valid for EISA adapter */
872 
873 	if (dfx_bus_eisa) {
874 		/*
875 		 * First check if revision 2 EISA controller.  Rev. 1 cards used
876 		 * PDQ revision B, so no workaround needed in this case.  Rev. 3
877 		 * cards used PDQ revision E, so no workaround needed in this
878 		 * case, either.  Only Rev. 2 cards used either Rev. D or E
879 		 * chips, so we must verify the chip revision on Rev. 2 cards.
880 		 */
881 		if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
882 			/*
883 			 * Revision 2 FDDI EISA controller found,
884 			 * so let's check PDQ revision of adapter.
885 			 */
886 			status = dfx_hw_port_ctrl_req(bp,
887 											PI_PCTRL_M_SUB_CMD,
888 											PI_SUB_CMD_K_PDQ_REV_GET,
889 											0,
890 											&host_data);
891 			if ((status != DFX_K_SUCCESS) || (host_data == 2))
892 				{
893 				/*
894 				 * Either we couldn't determine the PDQ revision, or
895 				 * we determined that it is at revision D.  In either case,
896 				 * we need to implement the workaround.
897 				 */
898 
899 				/* Ensure that the burst size is set to 8 longwords or less */
900 
901 				switch (bp->burst_size)
902 					{
903 					case PI_PDATA_B_DMA_BURST_SIZE_32:
904 					case PI_PDATA_B_DMA_BURST_SIZE_16:
905 						bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
906 						break;
907 
908 					default:
909 						break;
910 					}
911 
912 				/* Ensure that full-duplex mode is not enabled */
913 
914 				bp->full_duplex_enb = PI_SNMP_K_FALSE;
915 				}
916 			}
917 		}
918 	}
919 
920 
921 /*
922  * ===================
923  * = dfx_driver_init =
924  * ===================
925  *
926  * Overview:
927  *   Initializes remaining adapter board structure information
928  *   and makes sure adapter is in a safe state prior to dfx_open().
929  *
930  * Returns:
931  *   Condition code
932  *
933  * Arguments:
934  *   dev - pointer to device information
935  *   print_name - printable device name
936  *
937  * Functional Description:
938  *   This function allocates additional resources such as the host memory
939  *   blocks needed by the adapter (eg. descriptor and consumer blocks).
940  *	 Remaining bus initialization steps are also completed.  The adapter
941  *   is also reset so that it is in the DMA_UNAVAILABLE state.  The OS
942  *   must call dfx_open() to open the adapter and bring it on-line.
943  *
944  * Return Codes:
945  *   DFX_K_SUCCESS	- initialization succeeded
946  *   DFX_K_FAILURE	- initialization failed - could not allocate memory
947  *						or read adapter MAC address
948  *
949  * Assumptions:
950  *   Memory allocated from pci_alloc_consistent() call is physically
951  *   contiguous, locked memory.
952  *
953  * Side Effects:
954  *   Adapter is reset and should be in DMA_UNAVAILABLE state before
955  *   returning from this routine.
956  */
957 
958 static int dfx_driver_init(struct net_device *dev, const char *print_name,
959 			   resource_size_t bar_start)
960 {
961 	DFX_board_t *bp = netdev_priv(dev);
962 	struct device *bdev = bp->bus_dev;
963 	int dfx_bus_pci = dev_is_pci(bdev);
964 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
965 	int dfx_bus_tc = DFX_BUS_TC(bdev);
966 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
967 	int alloc_size;			/* total buffer size needed */
968 	char *top_v, *curr_v;		/* virtual addrs into memory block */
969 	dma_addr_t top_p, curr_p;	/* physical addrs into memory block */
970 	u32 data;			/* host data register value */
971 	__le32 le32;
972 	char *board_name = NULL;
973 
974 	DBG_printk("In dfx_driver_init...\n");
975 
976 	/* Initialize bus-specific hardware registers */
977 
978 	dfx_bus_init(dev);
979 
980 	/*
981 	 * Initialize default values for configurable parameters
982 	 *
983 	 * Note: All of these parameters are ones that a user may
984 	 *       want to customize.  It'd be nice to break these
985 	 *		 out into Space.c or someplace else that's more
986 	 *		 accessible/understandable than this file.
987 	 */
988 
989 	bp->full_duplex_enb		= PI_SNMP_K_FALSE;
990 	bp->req_ttrt			= 8 * 12500;		/* 8ms in 80 nanosec units */
991 	bp->burst_size			= PI_PDATA_B_DMA_BURST_SIZE_DEF;
992 	bp->rcv_bufs_to_post	= RCV_BUFS_DEF;
993 
994 	/*
995 	 * Ensure that HW configuration is OK
996 	 *
997 	 * Note: Depending on the hardware revision, we may need to modify
998 	 *       some of the configurable parameters to workaround hardware
999 	 *       limitations.  We'll perform this configuration check AFTER
1000 	 *       setting the parameters to their default values.
1001 	 */
1002 
1003 	dfx_bus_config_check(bp);
1004 
1005 	/* Disable PDQ interrupts first */
1006 
1007 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1008 
1009 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1010 
1011 	(void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1012 
1013 	/*  Read the factory MAC address from the adapter then save it */
1014 
1015 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1016 				 &data) != DFX_K_SUCCESS) {
1017 		printk("%s: Could not read adapter factory MAC address!\n",
1018 		       print_name);
1019 		return DFX_K_FAILURE;
1020 	}
1021 	le32 = cpu_to_le32(data);
1022 	memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1023 
1024 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1025 				 &data) != DFX_K_SUCCESS) {
1026 		printk("%s: Could not read adapter factory MAC address!\n",
1027 		       print_name);
1028 		return DFX_K_FAILURE;
1029 	}
1030 	le32 = cpu_to_le32(data);
1031 	memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1032 
1033 	/*
1034 	 * Set current address to factory address
1035 	 *
1036 	 * Note: Node address override support is handled through
1037 	 *       dfx_ctl_set_mac_address.
1038 	 */
1039 
1040 	memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1041 	if (dfx_bus_tc)
1042 		board_name = "DEFTA";
1043 	if (dfx_bus_eisa)
1044 		board_name = "DEFEA";
1045 	if (dfx_bus_pci)
1046 		board_name = "DEFPA";
1047 	pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1048 		print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1049 		(long long)bar_start, dev->irq, dev->dev_addr);
1050 
1051 	/*
1052 	 * Get memory for descriptor block, consumer block, and other buffers
1053 	 * that need to be DMA read or written to by the adapter.
1054 	 */
1055 
1056 	alloc_size = sizeof(PI_DESCR_BLOCK) +
1057 					PI_CMD_REQ_K_SIZE_MAX +
1058 					PI_CMD_RSP_K_SIZE_MAX +
1059 #ifndef DYNAMIC_BUFFERS
1060 					(bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1061 #endif
1062 					sizeof(PI_CONSUMER_BLOCK) +
1063 					(PI_ALIGN_K_DESC_BLK - 1);
1064 	bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size,
1065 						    &bp->kmalloced_dma,
1066 						    GFP_ATOMIC);
1067 	if (top_v == NULL)
1068 		return DFX_K_FAILURE;
1069 
1070 	top_p = bp->kmalloced_dma;	/* get physical address of buffer */
1071 
1072 	/*
1073 	 *  To guarantee the 8K alignment required for the descriptor block, 8K - 1
1074 	 *  plus the amount of memory needed was allocated.  The physical address
1075 	 *	is now 8K aligned.  By carving up the memory in a specific order,
1076 	 *  we'll guarantee the alignment requirements for all other structures.
1077 	 *
1078 	 *  Note: If the assumptions change regarding the non-paged, non-cached,
1079 	 *		  physically contiguous nature of the memory block or the address
1080 	 *		  alignments, then we'll need to implement a different algorithm
1081 	 *		  for allocating the needed memory.
1082 	 */
1083 
1084 	curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1085 	curr_v = top_v + (curr_p - top_p);
1086 
1087 	/* Reserve space for descriptor block */
1088 
1089 	bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1090 	bp->descr_block_phys = curr_p;
1091 	curr_v += sizeof(PI_DESCR_BLOCK);
1092 	curr_p += sizeof(PI_DESCR_BLOCK);
1093 
1094 	/* Reserve space for command request buffer */
1095 
1096 	bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1097 	bp->cmd_req_phys = curr_p;
1098 	curr_v += PI_CMD_REQ_K_SIZE_MAX;
1099 	curr_p += PI_CMD_REQ_K_SIZE_MAX;
1100 
1101 	/* Reserve space for command response buffer */
1102 
1103 	bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1104 	bp->cmd_rsp_phys = curr_p;
1105 	curr_v += PI_CMD_RSP_K_SIZE_MAX;
1106 	curr_p += PI_CMD_RSP_K_SIZE_MAX;
1107 
1108 	/* Reserve space for the LLC host receive queue buffers */
1109 
1110 	bp->rcv_block_virt = curr_v;
1111 	bp->rcv_block_phys = curr_p;
1112 
1113 #ifndef DYNAMIC_BUFFERS
1114 	curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1115 	curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1116 #endif
1117 
1118 	/* Reserve space for the consumer block */
1119 
1120 	bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1121 	bp->cons_block_phys = curr_p;
1122 
1123 	/* Display virtual and physical addresses if debug driver */
1124 
1125 	DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
1126 		   print_name,
1127 		   (long)bp->descr_block_virt, bp->descr_block_phys);
1128 	DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
1129 		   print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
1130 	DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
1131 		   print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
1132 	DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
1133 		   print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
1134 	DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
1135 		   print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
1136 
1137 	return DFX_K_SUCCESS;
1138 }
1139 
1140 
1141 /*
1142  * =================
1143  * = dfx_adap_init =
1144  * =================
1145  *
1146  * Overview:
1147  *   Brings the adapter to the link avail/link unavailable state.
1148  *
1149  * Returns:
1150  *   Condition code
1151  *
1152  * Arguments:
1153  *   bp - pointer to board information
1154  *   get_buffers - non-zero if buffers to be allocated
1155  *
1156  * Functional Description:
1157  *   Issues the low-level firmware/hardware calls necessary to bring
1158  *   the adapter up, or to properly reset and restore adapter during
1159  *   run-time.
1160  *
1161  * Return Codes:
1162  *   DFX_K_SUCCESS - Adapter brought up successfully
1163  *   DFX_K_FAILURE - Adapter initialization failed
1164  *
1165  * Assumptions:
1166  *   bp->reset_type should be set to a valid reset type value before
1167  *   calling this routine.
1168  *
1169  * Side Effects:
1170  *   Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1171  *   upon a successful return of this routine.
1172  */
1173 
1174 static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1175 	{
1176 	DBG_printk("In dfx_adap_init...\n");
1177 
1178 	/* Disable PDQ interrupts first */
1179 
1180 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1181 
1182 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1183 
1184 	if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1185 		{
1186 		printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1187 		return DFX_K_FAILURE;
1188 		}
1189 
1190 	/*
1191 	 * When the PDQ is reset, some false Type 0 interrupts may be pending,
1192 	 * so we'll acknowledge all Type 0 interrupts now before continuing.
1193 	 */
1194 
1195 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1196 
1197 	/*
1198 	 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
1199 	 *
1200 	 * Note: We only need to clear host copies of these registers.  The PDQ reset
1201 	 *       takes care of the on-board register values.
1202 	 */
1203 
1204 	bp->cmd_req_reg.lword	= 0;
1205 	bp->cmd_rsp_reg.lword	= 0;
1206 	bp->rcv_xmt_reg.lword	= 0;
1207 
1208 	/* Clear consumer block before going to DMA_AVAILABLE state */
1209 
1210 	memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1211 
1212 	/* Initialize the DMA Burst Size */
1213 
1214 	if (dfx_hw_port_ctrl_req(bp,
1215 							PI_PCTRL_M_SUB_CMD,
1216 							PI_SUB_CMD_K_BURST_SIZE_SET,
1217 							bp->burst_size,
1218 							NULL) != DFX_K_SUCCESS)
1219 		{
1220 		printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1221 		return DFX_K_FAILURE;
1222 		}
1223 
1224 	/*
1225 	 * Set base address of Consumer Block
1226 	 *
1227 	 * Assumption: 32-bit physical address of consumer block is 64 byte
1228 	 *			   aligned.  That is, bits 0-5 of the address must be zero.
1229 	 */
1230 
1231 	if (dfx_hw_port_ctrl_req(bp,
1232 							PI_PCTRL_M_CONS_BLOCK,
1233 							bp->cons_block_phys,
1234 							0,
1235 							NULL) != DFX_K_SUCCESS)
1236 		{
1237 		printk("%s: Could not set consumer block address!\n", bp->dev->name);
1238 		return DFX_K_FAILURE;
1239 		}
1240 
1241 	/*
1242 	 * Set the base address of Descriptor Block and bring adapter
1243 	 * to DMA_AVAILABLE state.
1244 	 *
1245 	 * Note: We also set the literal and data swapping requirements
1246 	 *       in this command.
1247 	 *
1248 	 * Assumption: 32-bit physical address of descriptor block
1249 	 *       is 8Kbyte aligned.
1250 	 */
1251 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1252 				 (u32)(bp->descr_block_phys |
1253 				       PI_PDATA_A_INIT_M_BSWAP_INIT),
1254 				 0, NULL) != DFX_K_SUCCESS) {
1255 		printk("%s: Could not set descriptor block address!\n",
1256 		       bp->dev->name);
1257 		return DFX_K_FAILURE;
1258 	}
1259 
1260 	/* Set transmit flush timeout value */
1261 
1262 	bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1263 	bp->cmd_req_virt->char_set.item[0].item_code	= PI_ITEM_K_FLUSH_TIME;
1264 	bp->cmd_req_virt->char_set.item[0].value		= 3;	/* 3 seconds */
1265 	bp->cmd_req_virt->char_set.item[0].item_index	= 0;
1266 	bp->cmd_req_virt->char_set.item[1].item_code	= PI_ITEM_K_EOL;
1267 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1268 		{
1269 		printk("%s: DMA command request failed!\n", bp->dev->name);
1270 		return DFX_K_FAILURE;
1271 		}
1272 
1273 	/* Set the initial values for eFDXEnable and MACTReq MIB objects */
1274 
1275 	bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1276 	bp->cmd_req_virt->snmp_set.item[0].item_code	= PI_ITEM_K_FDX_ENB_DIS;
1277 	bp->cmd_req_virt->snmp_set.item[0].value		= bp->full_duplex_enb;
1278 	bp->cmd_req_virt->snmp_set.item[0].item_index	= 0;
1279 	bp->cmd_req_virt->snmp_set.item[1].item_code	= PI_ITEM_K_MAC_T_REQ;
1280 	bp->cmd_req_virt->snmp_set.item[1].value		= bp->req_ttrt;
1281 	bp->cmd_req_virt->snmp_set.item[1].item_index	= 0;
1282 	bp->cmd_req_virt->snmp_set.item[2].item_code	= PI_ITEM_K_EOL;
1283 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1284 		{
1285 		printk("%s: DMA command request failed!\n", bp->dev->name);
1286 		return DFX_K_FAILURE;
1287 		}
1288 
1289 	/* Initialize adapter CAM */
1290 
1291 	if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1292 		{
1293 		printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1294 		return DFX_K_FAILURE;
1295 		}
1296 
1297 	/* Initialize adapter filters */
1298 
1299 	if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1300 		{
1301 		printk("%s: Adapter filters update failed!\n", bp->dev->name);
1302 		return DFX_K_FAILURE;
1303 		}
1304 
1305 	/*
1306 	 * Remove any existing dynamic buffers (i.e. if the adapter is being
1307 	 * reinitialized)
1308 	 */
1309 
1310 	if (get_buffers)
1311 		dfx_rcv_flush(bp);
1312 
1313 	/* Initialize receive descriptor block and produce buffers */
1314 
1315 	if (dfx_rcv_init(bp, get_buffers))
1316 	        {
1317 		printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1318 		if (get_buffers)
1319 			dfx_rcv_flush(bp);
1320 		return DFX_K_FAILURE;
1321 		}
1322 
1323 	/* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
1324 
1325 	bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1326 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1327 		{
1328 		printk("%s: Start command failed\n", bp->dev->name);
1329 		if (get_buffers)
1330 			dfx_rcv_flush(bp);
1331 		return DFX_K_FAILURE;
1332 		}
1333 
1334 	/* Initialization succeeded, reenable PDQ interrupts */
1335 
1336 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1337 	return DFX_K_SUCCESS;
1338 	}
1339 
1340 
1341 /*
1342  * ============
1343  * = dfx_open =
1344  * ============
1345  *
1346  * Overview:
1347  *   Opens the adapter
1348  *
1349  * Returns:
1350  *   Condition code
1351  *
1352  * Arguments:
1353  *   dev - pointer to device information
1354  *
1355  * Functional Description:
1356  *   This function brings the adapter to an operational state.
1357  *
1358  * Return Codes:
1359  *   0		 - Adapter was successfully opened
1360  *   -EAGAIN - Could not register IRQ or adapter initialization failed
1361  *
1362  * Assumptions:
1363  *   This routine should only be called for a device that was
1364  *   initialized successfully.
1365  *
1366  * Side Effects:
1367  *   Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1368  *   if the open is successful.
1369  */
1370 
1371 static int dfx_open(struct net_device *dev)
1372 {
1373 	DFX_board_t *bp = netdev_priv(dev);
1374 	int ret;
1375 
1376 	DBG_printk("In dfx_open...\n");
1377 
1378 	/* Register IRQ - support shared interrupts by passing device ptr */
1379 
1380 	ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1381 			  dev);
1382 	if (ret) {
1383 		printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1384 		return ret;
1385 	}
1386 
1387 	/*
1388 	 * Set current address to factory MAC address
1389 	 *
1390 	 * Note: We've already done this step in dfx_driver_init.
1391 	 *       However, it's possible that a user has set a node
1392 	 *		 address override, then closed and reopened the
1393 	 *		 adapter.  Unless we reset the device address field
1394 	 *		 now, we'll continue to use the existing modified
1395 	 *		 address.
1396 	 */
1397 
1398 	memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1399 
1400 	/* Clear local unicast/multicast address tables and counts */
1401 
1402 	memset(bp->uc_table, 0, sizeof(bp->uc_table));
1403 	memset(bp->mc_table, 0, sizeof(bp->mc_table));
1404 	bp->uc_count = 0;
1405 	bp->mc_count = 0;
1406 
1407 	/* Disable promiscuous filter settings */
1408 
1409 	bp->ind_group_prom	= PI_FSTATE_K_BLOCK;
1410 	bp->group_prom		= PI_FSTATE_K_BLOCK;
1411 
1412 	spin_lock_init(&bp->lock);
1413 
1414 	/* Reset and initialize adapter */
1415 
1416 	bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST;	/* skip self-test */
1417 	if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1418 	{
1419 		printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1420 		free_irq(dev->irq, dev);
1421 		return -EAGAIN;
1422 	}
1423 
1424 	/* Set device structure info */
1425 	netif_start_queue(dev);
1426 	return 0;
1427 }
1428 
1429 
1430 /*
1431  * =============
1432  * = dfx_close =
1433  * =============
1434  *
1435  * Overview:
1436  *   Closes the device/module.
1437  *
1438  * Returns:
1439  *   Condition code
1440  *
1441  * Arguments:
1442  *   dev - pointer to device information
1443  *
1444  * Functional Description:
1445  *   This routine closes the adapter and brings it to a safe state.
1446  *   The interrupt service routine is deregistered with the OS.
1447  *   The adapter can be opened again with another call to dfx_open().
1448  *
1449  * Return Codes:
1450  *   Always return 0.
1451  *
1452  * Assumptions:
1453  *   No further requests for this adapter are made after this routine is
1454  *   called.  dfx_open() can be called to reset and reinitialize the
1455  *   adapter.
1456  *
1457  * Side Effects:
1458  *   Adapter should be in DMA_UNAVAILABLE state upon completion of this
1459  *   routine.
1460  */
1461 
1462 static int dfx_close(struct net_device *dev)
1463 {
1464 	DFX_board_t *bp = netdev_priv(dev);
1465 
1466 	DBG_printk("In dfx_close...\n");
1467 
1468 	/* Disable PDQ interrupts first */
1469 
1470 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1471 
1472 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1473 
1474 	(void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1475 
1476 	/*
1477 	 * Flush any pending transmit buffers
1478 	 *
1479 	 * Note: It's important that we flush the transmit buffers
1480 	 *		 BEFORE we clear our copy of the Type 2 register.
1481 	 *		 Otherwise, we'll have no idea how many buffers
1482 	 *		 we need to free.
1483 	 */
1484 
1485 	dfx_xmt_flush(bp);
1486 
1487 	/*
1488 	 * Clear Type 1 and Type 2 registers after adapter reset
1489 	 *
1490 	 * Note: Even though we're closing the adapter, it's
1491 	 *       possible that an interrupt will occur after
1492 	 *		 dfx_close is called.  Without some assurance to
1493 	 *		 the contrary we want to make sure that we don't
1494 	 *		 process receive and transmit LLC frames and update
1495 	 *		 the Type 2 register with bad information.
1496 	 */
1497 
1498 	bp->cmd_req_reg.lword	= 0;
1499 	bp->cmd_rsp_reg.lword	= 0;
1500 	bp->rcv_xmt_reg.lword	= 0;
1501 
1502 	/* Clear consumer block for the same reason given above */
1503 
1504 	memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1505 
1506 	/* Release all dynamically allocate skb in the receive ring. */
1507 
1508 	dfx_rcv_flush(bp);
1509 
1510 	/* Clear device structure flags */
1511 
1512 	netif_stop_queue(dev);
1513 
1514 	/* Deregister (free) IRQ */
1515 
1516 	free_irq(dev->irq, dev);
1517 
1518 	return 0;
1519 }
1520 
1521 
1522 /*
1523  * ======================
1524  * = dfx_int_pr_halt_id =
1525  * ======================
1526  *
1527  * Overview:
1528  *   Displays halt id's in string form.
1529  *
1530  * Returns:
1531  *   None
1532  *
1533  * Arguments:
1534  *   bp - pointer to board information
1535  *
1536  * Functional Description:
1537  *   Determine current halt id and display appropriate string.
1538  *
1539  * Return Codes:
1540  *   None
1541  *
1542  * Assumptions:
1543  *   None
1544  *
1545  * Side Effects:
1546  *   None
1547  */
1548 
1549 static void dfx_int_pr_halt_id(DFX_board_t	*bp)
1550 	{
1551 	PI_UINT32	port_status;			/* PDQ port status register value */
1552 	PI_UINT32	halt_id;				/* PDQ port status halt ID */
1553 
1554 	/* Read the latest port status */
1555 
1556 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1557 
1558 	/* Display halt state transition information */
1559 
1560 	halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1561 	switch (halt_id)
1562 		{
1563 		case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1564 			printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1565 			break;
1566 
1567 		case PI_HALT_ID_K_PARITY_ERROR:
1568 			printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1569 			break;
1570 
1571 		case PI_HALT_ID_K_HOST_DIR_HALT:
1572 			printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1573 			break;
1574 
1575 		case PI_HALT_ID_K_SW_FAULT:
1576 			printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1577 			break;
1578 
1579 		case PI_HALT_ID_K_HW_FAULT:
1580 			printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1581 			break;
1582 
1583 		case PI_HALT_ID_K_PC_TRACE:
1584 			printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1585 			break;
1586 
1587 		case PI_HALT_ID_K_DMA_ERROR:
1588 			printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1589 			break;
1590 
1591 		case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1592 			printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1593 			break;
1594 
1595 		case PI_HALT_ID_K_BUS_EXCEPTION:
1596 			printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1597 			break;
1598 
1599 		default:
1600 			printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1601 			break;
1602 		}
1603 	}
1604 
1605 
1606 /*
1607  * ==========================
1608  * = dfx_int_type_0_process =
1609  * ==========================
1610  *
1611  * Overview:
1612  *   Processes Type 0 interrupts.
1613  *
1614  * Returns:
1615  *   None
1616  *
1617  * Arguments:
1618  *   bp - pointer to board information
1619  *
1620  * Functional Description:
1621  *   Processes all enabled Type 0 interrupts.  If the reason for the interrupt
1622  *   is a serious fault on the adapter, then an error message is displayed
1623  *   and the adapter is reset.
1624  *
1625  *   One tricky potential timing window is the rapid succession of "link avail"
1626  *   "link unavail" state change interrupts.  The acknowledgement of the Type 0
1627  *   interrupt must be done before reading the state from the Port Status
1628  *   register.  This is true because a state change could occur after reading
1629  *   the data, but before acknowledging the interrupt.  If this state change
1630  *   does happen, it would be lost because the driver is using the old state,
1631  *   and it will never know about the new state because it subsequently
1632  *   acknowledges the state change interrupt.
1633  *
1634  *          INCORRECT                                      CORRECT
1635  *      read type 0 int reasons                   read type 0 int reasons
1636  *      read adapter state                        ack type 0 interrupts
1637  *      ack type 0 interrupts                     read adapter state
1638  *      ... process interrupt ...                 ... process interrupt ...
1639  *
1640  * Return Codes:
1641  *   None
1642  *
1643  * Assumptions:
1644  *   None
1645  *
1646  * Side Effects:
1647  *   An adapter reset may occur if the adapter has any Type 0 error interrupts
1648  *   or if the port status indicates that the adapter is halted.  The driver
1649  *   is responsible for reinitializing the adapter with the current CAM
1650  *   contents and adapter filter settings.
1651  */
1652 
1653 static void dfx_int_type_0_process(DFX_board_t	*bp)
1654 
1655 	{
1656 	PI_UINT32	type_0_status;		/* Host Interrupt Type 0 register */
1657 	PI_UINT32	state;				/* current adap state (from port status) */
1658 
1659 	/*
1660 	 * Read host interrupt Type 0 register to determine which Type 0
1661 	 * interrupts are pending.  Immediately write it back out to clear
1662 	 * those interrupts.
1663 	 */
1664 
1665 	dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1666 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1667 
1668 	/* Check for Type 0 error interrupts */
1669 
1670 	if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1671 							PI_TYPE_0_STAT_M_PM_PAR_ERR |
1672 							PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1673 		{
1674 		/* Check for Non-Existent Memory error */
1675 
1676 		if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1677 			printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1678 
1679 		/* Check for Packet Memory Parity error */
1680 
1681 		if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1682 			printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1683 
1684 		/* Check for Host Bus Parity error */
1685 
1686 		if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1687 			printk("%s: Host Bus Parity Error\n", bp->dev->name);
1688 
1689 		/* Reset adapter and bring it back on-line */
1690 
1691 		bp->link_available = PI_K_FALSE;	/* link is no longer available */
1692 		bp->reset_type = 0;					/* rerun on-board diagnostics */
1693 		printk("%s: Resetting adapter...\n", bp->dev->name);
1694 		if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1695 			{
1696 			printk("%s: Adapter reset failed!  Disabling adapter interrupts.\n", bp->dev->name);
1697 			dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1698 			return;
1699 			}
1700 		printk("%s: Adapter reset successful!\n", bp->dev->name);
1701 		return;
1702 		}
1703 
1704 	/* Check for transmit flush interrupt */
1705 
1706 	if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1707 		{
1708 		/* Flush any pending xmt's and acknowledge the flush interrupt */
1709 
1710 		bp->link_available = PI_K_FALSE;		/* link is no longer available */
1711 		dfx_xmt_flush(bp);						/* flush any outstanding packets */
1712 		(void) dfx_hw_port_ctrl_req(bp,
1713 									PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1714 									0,
1715 									0,
1716 									NULL);
1717 		}
1718 
1719 	/* Check for adapter state change */
1720 
1721 	if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1722 		{
1723 		/* Get latest adapter state */
1724 
1725 		state = dfx_hw_adap_state_rd(bp);	/* get adapter state */
1726 		if (state == PI_STATE_K_HALTED)
1727 			{
1728 			/*
1729 			 * Adapter has transitioned to HALTED state, try to reset
1730 			 * adapter to bring it back on-line.  If reset fails,
1731 			 * leave the adapter in the broken state.
1732 			 */
1733 
1734 			printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1735 			dfx_int_pr_halt_id(bp);			/* display halt id as string */
1736 
1737 			/* Reset adapter and bring it back on-line */
1738 
1739 			bp->link_available = PI_K_FALSE;	/* link is no longer available */
1740 			bp->reset_type = 0;					/* rerun on-board diagnostics */
1741 			printk("%s: Resetting adapter...\n", bp->dev->name);
1742 			if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1743 				{
1744 				printk("%s: Adapter reset failed!  Disabling adapter interrupts.\n", bp->dev->name);
1745 				dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1746 				return;
1747 				}
1748 			printk("%s: Adapter reset successful!\n", bp->dev->name);
1749 			}
1750 		else if (state == PI_STATE_K_LINK_AVAIL)
1751 			{
1752 			bp->link_available = PI_K_TRUE;		/* set link available flag */
1753 			}
1754 		}
1755 	}
1756 
1757 
1758 /*
1759  * ==================
1760  * = dfx_int_common =
1761  * ==================
1762  *
1763  * Overview:
1764  *   Interrupt service routine (ISR)
1765  *
1766  * Returns:
1767  *   None
1768  *
1769  * Arguments:
1770  *   bp - pointer to board information
1771  *
1772  * Functional Description:
1773  *   This is the ISR which processes incoming adapter interrupts.
1774  *
1775  * Return Codes:
1776  *   None
1777  *
1778  * Assumptions:
1779  *   This routine assumes PDQ interrupts have not been disabled.
1780  *   When interrupts are disabled at the PDQ, the Port Status register
1781  *   is automatically cleared.  This routine uses the Port Status
1782  *   register value to determine whether a Type 0 interrupt occurred,
1783  *   so it's important that adapter interrupts are not normally
1784  *   enabled/disabled at the PDQ.
1785  *
1786  *   It's vital that this routine is NOT reentered for the
1787  *   same board and that the OS is not in another section of
1788  *   code (eg. dfx_xmt_queue_pkt) for the same board on a
1789  *   different thread.
1790  *
1791  * Side Effects:
1792  *   Pending interrupts are serviced.  Depending on the type of
1793  *   interrupt, acknowledging and clearing the interrupt at the
1794  *   PDQ involves writing a register to clear the interrupt bit
1795  *   or updating completion indices.
1796  */
1797 
1798 static void dfx_int_common(struct net_device *dev)
1799 {
1800 	DFX_board_t *bp = netdev_priv(dev);
1801 	PI_UINT32	port_status;		/* Port Status register */
1802 
1803 	/* Process xmt interrupts - frequent case, so always call this routine */
1804 
1805 	if(dfx_xmt_done(bp))				/* free consumed xmt packets */
1806 		netif_wake_queue(dev);
1807 
1808 	/* Process rcv interrupts - frequent case, so always call this routine */
1809 
1810 	dfx_rcv_queue_process(bp);		/* service received LLC frames */
1811 
1812 	/*
1813 	 * Transmit and receive producer and completion indices are updated on the
1814 	 * adapter by writing to the Type 2 Producer register.  Since the frequent
1815 	 * case is that we'll be processing either LLC transmit or receive buffers,
1816 	 * we'll optimize I/O writes by doing a single register write here.
1817 	 */
1818 
1819 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1820 
1821 	/* Read PDQ Port Status register to find out which interrupts need processing */
1822 
1823 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1824 
1825 	/* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
1826 
1827 	if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1828 		dfx_int_type_0_process(bp);	/* process Type 0 interrupts */
1829 	}
1830 
1831 
1832 /*
1833  * =================
1834  * = dfx_interrupt =
1835  * =================
1836  *
1837  * Overview:
1838  *   Interrupt processing routine
1839  *
1840  * Returns:
1841  *   Whether a valid interrupt was seen.
1842  *
1843  * Arguments:
1844  *   irq	- interrupt vector
1845  *   dev_id	- pointer to device information
1846  *
1847  * Functional Description:
1848  *   This routine calls the interrupt processing routine for this adapter.  It
1849  *   disables and reenables adapter interrupts, as appropriate.  We can support
1850  *   shared interrupts since the incoming dev_id pointer provides our device
1851  *   structure context.
1852  *
1853  * Return Codes:
1854  *   IRQ_HANDLED - an IRQ was handled.
1855  *   IRQ_NONE    - no IRQ was handled.
1856  *
1857  * Assumptions:
1858  *   The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
1859  *   on Intel-based systems) is done by the operating system outside this
1860  *   routine.
1861  *
1862  *	 System interrupts are enabled through this call.
1863  *
1864  * Side Effects:
1865  *   Interrupts are disabled, then reenabled at the adapter.
1866  */
1867 
1868 static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1869 {
1870 	struct net_device *dev = dev_id;
1871 	DFX_board_t *bp = netdev_priv(dev);
1872 	struct device *bdev = bp->bus_dev;
1873 	int dfx_bus_pci = dev_is_pci(bdev);
1874 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1875 	int dfx_bus_tc = DFX_BUS_TC(bdev);
1876 
1877 	/* Service adapter interrupts */
1878 
1879 	if (dfx_bus_pci) {
1880 		u32 status;
1881 
1882 		dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1883 		if (!(status & PFI_STATUS_M_PDQ_INT))
1884 			return IRQ_NONE;
1885 
1886 		spin_lock(&bp->lock);
1887 
1888 		/* Disable PDQ-PFI interrupts at PFI */
1889 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1890 				    PFI_MODE_M_DMA_ENB);
1891 
1892 		/* Call interrupt service routine for this adapter */
1893 		dfx_int_common(dev);
1894 
1895 		/* Clear PDQ interrupt status bit and reenable interrupts */
1896 		dfx_port_write_long(bp, PFI_K_REG_STATUS,
1897 				    PFI_STATUS_M_PDQ_INT);
1898 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1899 				    (PFI_MODE_M_PDQ_INT_ENB |
1900 				     PFI_MODE_M_DMA_ENB));
1901 
1902 		spin_unlock(&bp->lock);
1903 	}
1904 	if (dfx_bus_eisa) {
1905 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1906 		u8 status;
1907 
1908 		status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1909 		if (!(status & PI_CONFIG_STAT_0_M_PEND))
1910 			return IRQ_NONE;
1911 
1912 		spin_lock(&bp->lock);
1913 
1914 		/* Disable interrupts at the ESIC */
1915 		status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1916 		outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1917 
1918 		/* Call interrupt service routine for this adapter */
1919 		dfx_int_common(dev);
1920 
1921 		/* Reenable interrupts at the ESIC */
1922 		status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1923 		status |= PI_CONFIG_STAT_0_M_INT_ENB;
1924 		outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1925 
1926 		spin_unlock(&bp->lock);
1927 	}
1928 	if (dfx_bus_tc) {
1929 		u32 status;
1930 
1931 		dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
1932 		if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
1933 				PI_PSTATUS_M_XMT_DATA_PENDING |
1934 				PI_PSTATUS_M_SMT_HOST_PENDING |
1935 				PI_PSTATUS_M_UNSOL_PENDING |
1936 				PI_PSTATUS_M_CMD_RSP_PENDING |
1937 				PI_PSTATUS_M_CMD_REQ_PENDING |
1938 				PI_PSTATUS_M_TYPE_0_PENDING)))
1939 			return IRQ_NONE;
1940 
1941 		spin_lock(&bp->lock);
1942 
1943 		/* Call interrupt service routine for this adapter */
1944 		dfx_int_common(dev);
1945 
1946 		spin_unlock(&bp->lock);
1947 	}
1948 
1949 	return IRQ_HANDLED;
1950 }
1951 
1952 
1953 /*
1954  * =====================
1955  * = dfx_ctl_get_stats =
1956  * =====================
1957  *
1958  * Overview:
1959  *   Get statistics for FDDI adapter
1960  *
1961  * Returns:
1962  *   Pointer to FDDI statistics structure
1963  *
1964  * Arguments:
1965  *   dev - pointer to device information
1966  *
1967  * Functional Description:
1968  *   Gets current MIB objects from adapter, then
1969  *   returns FDDI statistics structure as defined
1970  *   in if_fddi.h.
1971  *
1972  *   Note: Since the FDDI statistics structure is
1973  *   still new and the device structure doesn't
1974  *   have an FDDI-specific get statistics handler,
1975  *   we'll return the FDDI statistics structure as
1976  *   a pointer to an Ethernet statistics structure.
1977  *   That way, at least the first part of the statistics
1978  *   structure can be decoded properly, and it allows
1979  *   "smart" applications to perform a second cast to
1980  *   decode the FDDI-specific statistics.
1981  *
1982  *   We'll have to pay attention to this routine as the
1983  *   device structure becomes more mature and LAN media
1984  *   independent.
1985  *
1986  * Return Codes:
1987  *   None
1988  *
1989  * Assumptions:
1990  *   None
1991  *
1992  * Side Effects:
1993  *   None
1994  */
1995 
1996 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
1997 	{
1998 	DFX_board_t *bp = netdev_priv(dev);
1999 
2000 	/* Fill the bp->stats structure with driver-maintained counters */
2001 
2002 	bp->stats.gen.rx_packets = bp->rcv_total_frames;
2003 	bp->stats.gen.tx_packets = bp->xmt_total_frames;
2004 	bp->stats.gen.rx_bytes   = bp->rcv_total_bytes;
2005 	bp->stats.gen.tx_bytes   = bp->xmt_total_bytes;
2006 	bp->stats.gen.rx_errors  = bp->rcv_crc_errors +
2007 				   bp->rcv_frame_status_errors +
2008 				   bp->rcv_length_errors;
2009 	bp->stats.gen.tx_errors  = bp->xmt_length_errors;
2010 	bp->stats.gen.rx_dropped = bp->rcv_discards;
2011 	bp->stats.gen.tx_dropped = bp->xmt_discards;
2012 	bp->stats.gen.multicast  = bp->rcv_multicast_frames;
2013 	bp->stats.gen.collisions = 0;		/* always zero (0) for FDDI */
2014 
2015 	/* Get FDDI SMT MIB objects */
2016 
2017 	bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2018 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2019 		return (struct net_device_stats *)&bp->stats;
2020 
2021 	/* Fill the bp->stats structure with the SMT MIB object values */
2022 
2023 	memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2024 	bp->stats.smt_op_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2025 	bp->stats.smt_hi_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2026 	bp->stats.smt_lo_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2027 	memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2028 	bp->stats.smt_mib_version_id				= bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2029 	bp->stats.smt_mac_cts						= bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2030 	bp->stats.smt_non_master_cts				= bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2031 	bp->stats.smt_master_cts					= bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2032 	bp->stats.smt_available_paths				= bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2033 	bp->stats.smt_config_capabilities			= bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2034 	bp->stats.smt_config_policy					= bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2035 	bp->stats.smt_connection_policy				= bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2036 	bp->stats.smt_t_notify						= bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2037 	bp->stats.smt_stat_rpt_policy				= bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2038 	bp->stats.smt_trace_max_expiration			= bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2039 	bp->stats.smt_bypass_present				= bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2040 	bp->stats.smt_ecm_state						= bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2041 	bp->stats.smt_cf_state						= bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2042 	bp->stats.smt_remote_disconnect_flag		= bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2043 	bp->stats.smt_station_status				= bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2044 	bp->stats.smt_peer_wrap_flag				= bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2045 	bp->stats.smt_time_stamp					= bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2046 	bp->stats.smt_transition_time_stamp			= bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2047 	bp->stats.mac_frame_status_functions		= bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2048 	bp->stats.mac_t_max_capability				= bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2049 	bp->stats.mac_tvx_capability				= bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2050 	bp->stats.mac_available_paths				= bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2051 	bp->stats.mac_current_path					= bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2052 	memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2053 	memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2054 	memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2055 	memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2056 	bp->stats.mac_dup_address_test				= bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2057 	bp->stats.mac_requested_paths				= bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2058 	bp->stats.mac_downstream_port_type			= bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2059 	memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2060 	bp->stats.mac_t_req							= bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2061 	bp->stats.mac_t_neg							= bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2062 	bp->stats.mac_t_max							= bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2063 	bp->stats.mac_tvx_value						= bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2064 	bp->stats.mac_frame_error_threshold			= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2065 	bp->stats.mac_frame_error_ratio				= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2066 	bp->stats.mac_rmt_state						= bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2067 	bp->stats.mac_da_flag						= bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2068 	bp->stats.mac_una_da_flag					= bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2069 	bp->stats.mac_frame_error_flag				= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2070 	bp->stats.mac_ma_unitdata_available			= bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2071 	bp->stats.mac_hardware_present				= bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2072 	bp->stats.mac_ma_unitdata_enable			= bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2073 	bp->stats.path_tvx_lower_bound				= bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2074 	bp->stats.path_t_max_lower_bound			= bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2075 	bp->stats.path_max_t_req					= bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2076 	memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2077 	bp->stats.port_my_type[0]					= bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2078 	bp->stats.port_my_type[1]					= bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2079 	bp->stats.port_neighbor_type[0]				= bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2080 	bp->stats.port_neighbor_type[1]				= bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2081 	bp->stats.port_connection_policies[0]		= bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2082 	bp->stats.port_connection_policies[1]		= bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2083 	bp->stats.port_mac_indicated[0]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2084 	bp->stats.port_mac_indicated[1]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2085 	bp->stats.port_current_path[0]				= bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2086 	bp->stats.port_current_path[1]				= bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2087 	memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2088 	memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2089 	bp->stats.port_mac_placement[0]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2090 	bp->stats.port_mac_placement[1]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2091 	bp->stats.port_available_paths[0]			= bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2092 	bp->stats.port_available_paths[1]			= bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2093 	bp->stats.port_pmd_class[0]					= bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2094 	bp->stats.port_pmd_class[1]					= bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2095 	bp->stats.port_connection_capabilities[0]	= bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2096 	bp->stats.port_connection_capabilities[1]	= bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2097 	bp->stats.port_bs_flag[0]					= bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2098 	bp->stats.port_bs_flag[1]					= bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2099 	bp->stats.port_ler_estimate[0]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2100 	bp->stats.port_ler_estimate[1]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2101 	bp->stats.port_ler_cutoff[0]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2102 	bp->stats.port_ler_cutoff[1]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2103 	bp->stats.port_ler_alarm[0]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2104 	bp->stats.port_ler_alarm[1]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2105 	bp->stats.port_connect_state[0]				= bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2106 	bp->stats.port_connect_state[1]				= bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2107 	bp->stats.port_pcm_state[0]					= bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2108 	bp->stats.port_pcm_state[1]					= bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2109 	bp->stats.port_pc_withhold[0]				= bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2110 	bp->stats.port_pc_withhold[1]				= bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2111 	bp->stats.port_ler_flag[0]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2112 	bp->stats.port_ler_flag[1]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2113 	bp->stats.port_hardware_present[0]			= bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2114 	bp->stats.port_hardware_present[1]			= bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2115 
2116 	/* Get FDDI counters */
2117 
2118 	bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2119 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2120 		return (struct net_device_stats *)&bp->stats;
2121 
2122 	/* Fill the bp->stats structure with the FDDI counter values */
2123 
2124 	bp->stats.mac_frame_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2125 	bp->stats.mac_copied_cts			= bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2126 	bp->stats.mac_transmit_cts			= bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2127 	bp->stats.mac_error_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2128 	bp->stats.mac_lost_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2129 	bp->stats.port_lct_fail_cts[0]		= bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2130 	bp->stats.port_lct_fail_cts[1]		= bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2131 	bp->stats.port_lem_reject_cts[0]	= bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2132 	bp->stats.port_lem_reject_cts[1]	= bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2133 	bp->stats.port_lem_cts[0]			= bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2134 	bp->stats.port_lem_cts[1]			= bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2135 
2136 	return (struct net_device_stats *)&bp->stats;
2137 	}
2138 
2139 
2140 /*
2141  * ==============================
2142  * = dfx_ctl_set_multicast_list =
2143  * ==============================
2144  *
2145  * Overview:
2146  *   Enable/Disable LLC frame promiscuous mode reception
2147  *   on the adapter and/or update multicast address table.
2148  *
2149  * Returns:
2150  *   None
2151  *
2152  * Arguments:
2153  *   dev - pointer to device information
2154  *
2155  * Functional Description:
2156  *   This routine follows a fairly simple algorithm for setting the
2157  *   adapter filters and CAM:
2158  *
2159  *		if IFF_PROMISC flag is set
2160  *			enable LLC individual/group promiscuous mode
2161  *		else
2162  *			disable LLC individual/group promiscuous mode
2163  *			if number of incoming multicast addresses >
2164  *					(CAM max size - number of unicast addresses in CAM)
2165  *				enable LLC group promiscuous mode
2166  *				set driver-maintained multicast address count to zero
2167  *			else
2168  *				disable LLC group promiscuous mode
2169  *				set driver-maintained multicast address count to incoming count
2170  *			update adapter CAM
2171  *		update adapter filters
2172  *
2173  * Return Codes:
2174  *   None
2175  *
2176  * Assumptions:
2177  *   Multicast addresses are presented in canonical (LSB) format.
2178  *
2179  * Side Effects:
2180  *   On-board adapter CAM and filters are updated.
2181  */
2182 
2183 static void dfx_ctl_set_multicast_list(struct net_device *dev)
2184 {
2185 	DFX_board_t *bp = netdev_priv(dev);
2186 	int					i;			/* used as index in for loop */
2187 	struct netdev_hw_addr *ha;
2188 
2189 	/* Enable LLC frame promiscuous mode, if necessary */
2190 
2191 	if (dev->flags & IFF_PROMISC)
2192 		bp->ind_group_prom = PI_FSTATE_K_PASS;		/* Enable LLC ind/group prom mode */
2193 
2194 	/* Else, update multicast address table */
2195 
2196 	else
2197 		{
2198 		bp->ind_group_prom = PI_FSTATE_K_BLOCK;		/* Disable LLC ind/group prom mode */
2199 		/*
2200 		 * Check whether incoming multicast address count exceeds table size
2201 		 *
2202 		 * Note: The adapters utilize an on-board 64 entry CAM for
2203 		 *       supporting perfect filtering of multicast packets
2204 		 *		 and bridge functions when adding unicast addresses.
2205 		 *		 There is no hash function available.  To support
2206 		 *		 additional multicast addresses, the all multicast
2207 		 *		 filter (LLC group promiscuous mode) must be enabled.
2208 		 *
2209 		 *		 The firmware reserves two CAM entries for SMT-related
2210 		 *		 multicast addresses, which leaves 62 entries available.
2211 		 *		 The following code ensures that we're not being asked
2212 		 *		 to add more than 62 addresses to the CAM.  If we are,
2213 		 *		 the driver will enable the all multicast filter.
2214 		 *		 Should the number of multicast addresses drop below
2215 		 *		 the high water mark, the filter will be disabled and
2216 		 *		 perfect filtering will be used.
2217 		 */
2218 
2219 		if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2220 			{
2221 			bp->group_prom	= PI_FSTATE_K_PASS;		/* Enable LLC group prom mode */
2222 			bp->mc_count	= 0;					/* Don't add mc addrs to CAM */
2223 			}
2224 		else
2225 			{
2226 			bp->group_prom	= PI_FSTATE_K_BLOCK;	/* Disable LLC group prom mode */
2227 			bp->mc_count	= netdev_mc_count(dev);		/* Add mc addrs to CAM */
2228 			}
2229 
2230 		/* Copy addresses to multicast address table, then update adapter CAM */
2231 
2232 		i = 0;
2233 		netdev_for_each_mc_addr(ha, dev)
2234 			memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2235 			       ha->addr, FDDI_K_ALEN);
2236 
2237 		if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2238 			{
2239 			DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2240 			}
2241 		else
2242 			{
2243 			DBG_printk("%s: Multicast address table updated!  Added %d addresses.\n", dev->name, bp->mc_count);
2244 			}
2245 		}
2246 
2247 	/* Update adapter filters */
2248 
2249 	if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2250 		{
2251 		DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2252 		}
2253 	else
2254 		{
2255 		DBG_printk("%s: Adapter filters updated!\n", dev->name);
2256 		}
2257 	}
2258 
2259 
2260 /*
2261  * ===========================
2262  * = dfx_ctl_set_mac_address =
2263  * ===========================
2264  *
2265  * Overview:
2266  *   Add node address override (unicast address) to adapter
2267  *   CAM and update dev_addr field in device table.
2268  *
2269  * Returns:
2270  *   None
2271  *
2272  * Arguments:
2273  *   dev  - pointer to device information
2274  *   addr - pointer to sockaddr structure containing unicast address to add
2275  *
2276  * Functional Description:
2277  *   The adapter supports node address overrides by adding one or more
2278  *   unicast addresses to the adapter CAM.  This is similar to adding
2279  *   multicast addresses.  In this routine we'll update the driver and
2280  *   device structures with the new address, then update the adapter CAM
2281  *   to ensure that the adapter will copy and strip frames destined and
2282  *   sourced by that address.
2283  *
2284  * Return Codes:
2285  *   Always returns zero.
2286  *
2287  * Assumptions:
2288  *   The address pointed to by addr->sa_data is a valid unicast
2289  *   address and is presented in canonical (LSB) format.
2290  *
2291  * Side Effects:
2292  *   On-board adapter CAM is updated.  On-board adapter filters
2293  *   may be updated.
2294  */
2295 
2296 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2297 	{
2298 	struct sockaddr	*p_sockaddr = (struct sockaddr *)addr;
2299 	DFX_board_t *bp = netdev_priv(dev);
2300 
2301 	/* Copy unicast address to driver-maintained structs and update count */
2302 
2303 	memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);	/* update device struct */
2304 	memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN);	/* update driver struct */
2305 	bp->uc_count = 1;
2306 
2307 	/*
2308 	 * Verify we're not exceeding the CAM size by adding unicast address
2309 	 *
2310 	 * Note: It's possible that before entering this routine we've
2311 	 *       already filled the CAM with 62 multicast addresses.
2312 	 *		 Since we need to place the node address override into
2313 	 *		 the CAM, we have to check to see that we're not
2314 	 *		 exceeding the CAM size.  If we are, we have to enable
2315 	 *		 the LLC group (multicast) promiscuous mode filter as
2316 	 *		 in dfx_ctl_set_multicast_list.
2317 	 */
2318 
2319 	if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2320 		{
2321 		bp->group_prom	= PI_FSTATE_K_PASS;		/* Enable LLC group prom mode */
2322 		bp->mc_count	= 0;					/* Don't add mc addrs to CAM */
2323 
2324 		/* Update adapter filters */
2325 
2326 		if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2327 			{
2328 			DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2329 			}
2330 		else
2331 			{
2332 			DBG_printk("%s: Adapter filters updated!\n", dev->name);
2333 			}
2334 		}
2335 
2336 	/* Update adapter CAM with new unicast address */
2337 
2338 	if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2339 		{
2340 		DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2341 		}
2342 	else
2343 		{
2344 		DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2345 		}
2346 	return 0;			/* always return zero */
2347 	}
2348 
2349 
2350 /*
2351  * ======================
2352  * = dfx_ctl_update_cam =
2353  * ======================
2354  *
2355  * Overview:
2356  *   Procedure to update adapter CAM (Content Addressable Memory)
2357  *   with desired unicast and multicast address entries.
2358  *
2359  * Returns:
2360  *   Condition code
2361  *
2362  * Arguments:
2363  *   bp - pointer to board information
2364  *
2365  * Functional Description:
2366  *   Updates adapter CAM with current contents of board structure
2367  *   unicast and multicast address tables.  Since there are only 62
2368  *   free entries in CAM, this routine ensures that the command
2369  *   request buffer is not overrun.
2370  *
2371  * Return Codes:
2372  *   DFX_K_SUCCESS - Request succeeded
2373  *   DFX_K_FAILURE - Request failed
2374  *
2375  * Assumptions:
2376  *   All addresses being added (unicast and multicast) are in canonical
2377  *   order.
2378  *
2379  * Side Effects:
2380  *   On-board adapter CAM is updated.
2381  */
2382 
2383 static int dfx_ctl_update_cam(DFX_board_t *bp)
2384 	{
2385 	int			i;				/* used as index */
2386 	PI_LAN_ADDR	*p_addr;		/* pointer to CAM entry */
2387 
2388 	/*
2389 	 * Fill in command request information
2390 	 *
2391 	 * Note: Even though both the unicast and multicast address
2392 	 *       table entries are stored as contiguous 6 byte entries,
2393 	 *		 the firmware address filter set command expects each
2394 	 *		 entry to be two longwords (8 bytes total).  We must be
2395 	 *		 careful to only copy the six bytes of each unicast and
2396 	 *		 multicast table entry into each command entry.  This
2397 	 *		 is also why we must first clear the entire command
2398 	 *		 request buffer.
2399 	 */
2400 
2401 	memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX);	/* first clear buffer */
2402 	bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2403 	p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2404 
2405 	/* Now add unicast addresses to command request buffer, if any */
2406 
2407 	for (i=0; i < (int)bp->uc_count; i++)
2408 		{
2409 		if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2410 			{
2411 			memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2412 			p_addr++;			/* point to next command entry */
2413 			}
2414 		}
2415 
2416 	/* Now add multicast addresses to command request buffer, if any */
2417 
2418 	for (i=0; i < (int)bp->mc_count; i++)
2419 		{
2420 		if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2421 			{
2422 			memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2423 			p_addr++;			/* point to next command entry */
2424 			}
2425 		}
2426 
2427 	/* Issue command to update adapter CAM, then return */
2428 
2429 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2430 		return DFX_K_FAILURE;
2431 	return DFX_K_SUCCESS;
2432 	}
2433 
2434 
2435 /*
2436  * ==========================
2437  * = dfx_ctl_update_filters =
2438  * ==========================
2439  *
2440  * Overview:
2441  *   Procedure to update adapter filters with desired
2442  *   filter settings.
2443  *
2444  * Returns:
2445  *   Condition code
2446  *
2447  * Arguments:
2448  *   bp - pointer to board information
2449  *
2450  * Functional Description:
2451  *   Enables or disables filter using current filter settings.
2452  *
2453  * Return Codes:
2454  *   DFX_K_SUCCESS - Request succeeded.
2455  *   DFX_K_FAILURE - Request failed.
2456  *
2457  * Assumptions:
2458  *   We must always pass up packets destined to the broadcast
2459  *   address (FF-FF-FF-FF-FF-FF), so we'll always keep the
2460  *   broadcast filter enabled.
2461  *
2462  * Side Effects:
2463  *   On-board adapter filters are updated.
2464  */
2465 
2466 static int dfx_ctl_update_filters(DFX_board_t *bp)
2467 	{
2468 	int	i = 0;					/* used as index */
2469 
2470 	/* Fill in command request information */
2471 
2472 	bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2473 
2474 	/* Initialize Broadcast filter - * ALWAYS ENABLED * */
2475 
2476 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_BROADCAST;
2477 	bp->cmd_req_virt->filter_set.item[i++].value	= PI_FSTATE_K_PASS;
2478 
2479 	/* Initialize LLC Individual/Group Promiscuous filter */
2480 
2481 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_IND_GROUP_PROM;
2482 	bp->cmd_req_virt->filter_set.item[i++].value	= bp->ind_group_prom;
2483 
2484 	/* Initialize LLC Group Promiscuous filter */
2485 
2486 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_GROUP_PROM;
2487 	bp->cmd_req_virt->filter_set.item[i++].value	= bp->group_prom;
2488 
2489 	/* Terminate the item code list */
2490 
2491 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_EOL;
2492 
2493 	/* Issue command to update adapter filters, then return */
2494 
2495 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2496 		return DFX_K_FAILURE;
2497 	return DFX_K_SUCCESS;
2498 	}
2499 
2500 
2501 /*
2502  * ======================
2503  * = dfx_hw_dma_cmd_req =
2504  * ======================
2505  *
2506  * Overview:
2507  *   Sends PDQ DMA command to adapter firmware
2508  *
2509  * Returns:
2510  *   Condition code
2511  *
2512  * Arguments:
2513  *   bp - pointer to board information
2514  *
2515  * Functional Description:
2516  *   The command request and response buffers are posted to the adapter in the manner
2517  *   described in the PDQ Port Specification:
2518  *
2519  *		1. Command Response Buffer is posted to adapter.
2520  *		2. Command Request Buffer is posted to adapter.
2521  *		3. Command Request consumer index is polled until it indicates that request
2522  *         buffer has been DMA'd to adapter.
2523  *		4. Command Response consumer index is polled until it indicates that response
2524  *         buffer has been DMA'd from adapter.
2525  *
2526  *   This ordering ensures that a response buffer is already available for the firmware
2527  *   to use once it's done processing the request buffer.
2528  *
2529  * Return Codes:
2530  *   DFX_K_SUCCESS	  - DMA command succeeded
2531  * 	 DFX_K_OUTSTATE   - Adapter is NOT in proper state
2532  *   DFX_K_HW_TIMEOUT - DMA command timed out
2533  *
2534  * Assumptions:
2535  *   Command request buffer has already been filled with desired DMA command.
2536  *
2537  * Side Effects:
2538  *   None
2539  */
2540 
2541 static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2542 	{
2543 	int status;			/* adapter status */
2544 	int timeout_cnt;	/* used in for loops */
2545 
2546 	/* Make sure the adapter is in a state that we can issue the DMA command in */
2547 
2548 	status = dfx_hw_adap_state_rd(bp);
2549 	if ((status == PI_STATE_K_RESET)		||
2550 		(status == PI_STATE_K_HALTED)		||
2551 		(status == PI_STATE_K_DMA_UNAVAIL)	||
2552 		(status == PI_STATE_K_UPGRADE))
2553 		return DFX_K_OUTSTATE;
2554 
2555 	/* Put response buffer on the command response queue */
2556 
2557 	bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2558 			((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2559 	bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2560 
2561 	/* Bump (and wrap) the producer index and write out to register */
2562 
2563 	bp->cmd_rsp_reg.index.prod += 1;
2564 	bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2565 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2566 
2567 	/* Put request buffer on the command request queue */
2568 
2569 	bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2570 			PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2571 	bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2572 
2573 	/* Bump (and wrap) the producer index and write out to register */
2574 
2575 	bp->cmd_req_reg.index.prod += 1;
2576 	bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2577 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2578 
2579 	/*
2580 	 * Here we wait for the command request consumer index to be equal
2581 	 * to the producer, indicating that the adapter has DMAed the request.
2582 	 */
2583 
2584 	for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2585 		{
2586 		if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2587 			break;
2588 		udelay(100);			/* wait for 100 microseconds */
2589 		}
2590 	if (timeout_cnt == 0)
2591 		return DFX_K_HW_TIMEOUT;
2592 
2593 	/* Bump (and wrap) the completion index and write out to register */
2594 
2595 	bp->cmd_req_reg.index.comp += 1;
2596 	bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2597 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2598 
2599 	/*
2600 	 * Here we wait for the command response consumer index to be equal
2601 	 * to the producer, indicating that the adapter has DMAed the response.
2602 	 */
2603 
2604 	for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2605 		{
2606 		if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2607 			break;
2608 		udelay(100);			/* wait for 100 microseconds */
2609 		}
2610 	if (timeout_cnt == 0)
2611 		return DFX_K_HW_TIMEOUT;
2612 
2613 	/* Bump (and wrap) the completion index and write out to register */
2614 
2615 	bp->cmd_rsp_reg.index.comp += 1;
2616 	bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2617 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2618 	return DFX_K_SUCCESS;
2619 	}
2620 
2621 
2622 /*
2623  * ========================
2624  * = dfx_hw_port_ctrl_req =
2625  * ========================
2626  *
2627  * Overview:
2628  *   Sends PDQ port control command to adapter firmware
2629  *
2630  * Returns:
2631  *   Host data register value in host_data if ptr is not NULL
2632  *
2633  * Arguments:
2634  *   bp			- pointer to board information
2635  *	 command	- port control command
2636  *	 data_a		- port data A register value
2637  *	 data_b		- port data B register value
2638  *	 host_data	- ptr to host data register value
2639  *
2640  * Functional Description:
2641  *   Send generic port control command to adapter by writing
2642  *   to various PDQ port registers, then polling for completion.
2643  *
2644  * Return Codes:
2645  *   DFX_K_SUCCESS	  - port control command succeeded
2646  *   DFX_K_HW_TIMEOUT - port control command timed out
2647  *
2648  * Assumptions:
2649  *   None
2650  *
2651  * Side Effects:
2652  *   None
2653  */
2654 
2655 static int dfx_hw_port_ctrl_req(
2656 	DFX_board_t	*bp,
2657 	PI_UINT32	command,
2658 	PI_UINT32	data_a,
2659 	PI_UINT32	data_b,
2660 	PI_UINT32	*host_data
2661 	)
2662 
2663 	{
2664 	PI_UINT32	port_cmd;		/* Port Control command register value */
2665 	int			timeout_cnt;	/* used in for loops */
2666 
2667 	/* Set Command Error bit in command longword */
2668 
2669 	port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2670 
2671 	/* Issue port command to the adapter */
2672 
2673 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2674 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2675 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2676 
2677 	/* Now wait for command to complete */
2678 
2679 	if (command == PI_PCTRL_M_BLAST_FLASH)
2680 		timeout_cnt = 600000;	/* set command timeout count to 60 seconds */
2681 	else
2682 		timeout_cnt = 20000;	/* set command timeout count to 2 seconds */
2683 
2684 	for (; timeout_cnt > 0; timeout_cnt--)
2685 		{
2686 		dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2687 		if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2688 			break;
2689 		udelay(100);			/* wait for 100 microseconds */
2690 		}
2691 	if (timeout_cnt == 0)
2692 		return DFX_K_HW_TIMEOUT;
2693 
2694 	/*
2695 	 * If the address of host_data is non-zero, assume caller has supplied a
2696 	 * non NULL pointer, and return the contents of the HOST_DATA register in
2697 	 * it.
2698 	 */
2699 
2700 	if (host_data != NULL)
2701 		dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2702 	return DFX_K_SUCCESS;
2703 	}
2704 
2705 
2706 /*
2707  * =====================
2708  * = dfx_hw_adap_reset =
2709  * =====================
2710  *
2711  * Overview:
2712  *   Resets adapter
2713  *
2714  * Returns:
2715  *   None
2716  *
2717  * Arguments:
2718  *   bp   - pointer to board information
2719  *   type - type of reset to perform
2720  *
2721  * Functional Description:
2722  *   Issue soft reset to adapter by writing to PDQ Port Reset
2723  *   register.  Use incoming reset type to tell adapter what
2724  *   kind of reset operation to perform.
2725  *
2726  * Return Codes:
2727  *   None
2728  *
2729  * Assumptions:
2730  *   This routine merely issues a soft reset to the adapter.
2731  *   It is expected that after this routine returns, the caller
2732  *   will appropriately poll the Port Status register for the
2733  *   adapter to enter the proper state.
2734  *
2735  * Side Effects:
2736  *   Internal adapter registers are cleared.
2737  */
2738 
2739 static void dfx_hw_adap_reset(
2740 	DFX_board_t	*bp,
2741 	PI_UINT32	type
2742 	)
2743 
2744 	{
2745 	/* Set Reset type and assert reset */
2746 
2747 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type);	/* tell adapter type of reset */
2748 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2749 
2750 	/* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
2751 
2752 	udelay(20);
2753 
2754 	/* Deassert reset */
2755 
2756 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2757 	}
2758 
2759 
2760 /*
2761  * ========================
2762  * = dfx_hw_adap_state_rd =
2763  * ========================
2764  *
2765  * Overview:
2766  *   Returns current adapter state
2767  *
2768  * Returns:
2769  *   Adapter state per PDQ Port Specification
2770  *
2771  * Arguments:
2772  *   bp - pointer to board information
2773  *
2774  * Functional Description:
2775  *   Reads PDQ Port Status register and returns adapter state.
2776  *
2777  * Return Codes:
2778  *   None
2779  *
2780  * Assumptions:
2781  *   None
2782  *
2783  * Side Effects:
2784  *   None
2785  */
2786 
2787 static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2788 	{
2789 	PI_UINT32 port_status;		/* Port Status register value */
2790 
2791 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2792 	return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2793 	}
2794 
2795 
2796 /*
2797  * =====================
2798  * = dfx_hw_dma_uninit =
2799  * =====================
2800  *
2801  * Overview:
2802  *   Brings adapter to DMA_UNAVAILABLE state
2803  *
2804  * Returns:
2805  *   Condition code
2806  *
2807  * Arguments:
2808  *   bp   - pointer to board information
2809  *   type - type of reset to perform
2810  *
2811  * Functional Description:
2812  *   Bring adapter to DMA_UNAVAILABLE state by performing the following:
2813  *		1. Set reset type bit in Port Data A Register then reset adapter.
2814  *		2. Check that adapter is in DMA_UNAVAILABLE state.
2815  *
2816  * Return Codes:
2817  *   DFX_K_SUCCESS	  - adapter is in DMA_UNAVAILABLE state
2818  *   DFX_K_HW_TIMEOUT - adapter did not reset properly
2819  *
2820  * Assumptions:
2821  *   None
2822  *
2823  * Side Effects:
2824  *   Internal adapter registers are cleared.
2825  */
2826 
2827 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2828 	{
2829 	int timeout_cnt;	/* used in for loops */
2830 
2831 	/* Set reset type bit and reset adapter */
2832 
2833 	dfx_hw_adap_reset(bp, type);
2834 
2835 	/* Now wait for adapter to enter DMA_UNAVAILABLE state */
2836 
2837 	for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2838 		{
2839 		if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2840 			break;
2841 		udelay(100);					/* wait for 100 microseconds */
2842 		}
2843 	if (timeout_cnt == 0)
2844 		return DFX_K_HW_TIMEOUT;
2845 	return DFX_K_SUCCESS;
2846 	}
2847 
2848 /*
2849  *	Align an sk_buff to a boundary power of 2
2850  *
2851  */
2852 
2853 static void my_skb_align(struct sk_buff *skb, int n)
2854 {
2855 	unsigned long x = (unsigned long)skb->data;
2856 	unsigned long v;
2857 
2858 	v = ALIGN(x, n);	/* Where we want to be */
2859 
2860 	skb_reserve(skb, v - x);
2861 }
2862 
2863 
2864 /*
2865  * ================
2866  * = dfx_rcv_init =
2867  * ================
2868  *
2869  * Overview:
2870  *   Produces buffers to adapter LLC Host receive descriptor block
2871  *
2872  * Returns:
2873  *   None
2874  *
2875  * Arguments:
2876  *   bp - pointer to board information
2877  *   get_buffers - non-zero if buffers to be allocated
2878  *
2879  * Functional Description:
2880  *   This routine can be called during dfx_adap_init() or during an adapter
2881  *	 reset.  It initializes the descriptor block and produces all allocated
2882  *   LLC Host queue receive buffers.
2883  *
2884  * Return Codes:
2885  *   Return 0 on success or -ENOMEM if buffer allocation failed (when using
2886  *   dynamic buffer allocation). If the buffer allocation failed, the
2887  *   already allocated buffers will not be released and the caller should do
2888  *   this.
2889  *
2890  * Assumptions:
2891  *   The PDQ has been reset and the adapter and driver maintained Type 2
2892  *   register indices are cleared.
2893  *
2894  * Side Effects:
2895  *   Receive buffers are posted to the adapter LLC queue and the adapter
2896  *   is notified.
2897  */
2898 
2899 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2900 	{
2901 	int	i, j;					/* used in for loop */
2902 
2903 	/*
2904 	 *  Since each receive buffer is a single fragment of same length, initialize
2905 	 *  first longword in each receive descriptor for entire LLC Host descriptor
2906 	 *  block.  Also initialize second longword in each receive descriptor with
2907 	 *  physical address of receive buffer.  We'll always allocate receive
2908 	 *  buffers in powers of 2 so that we can easily fill the 256 entry descriptor
2909 	 *  block and produce new receive buffers by simply updating the receive
2910 	 *  producer index.
2911 	 *
2912 	 * 	Assumptions:
2913 	 *		To support all shipping versions of PDQ, the receive buffer size
2914 	 *		must be mod 128 in length and the physical address must be 128 byte
2915 	 *		aligned.  In other words, bits 0-6 of the length and address must
2916 	 *		be zero for the following descriptor field entries to be correct on
2917 	 *		all PDQ-based boards.  We guaranteed both requirements during
2918 	 *		driver initialization when we allocated memory for the receive buffers.
2919 	 */
2920 
2921 	if (get_buffers) {
2922 #ifdef DYNAMIC_BUFFERS
2923 	for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
2924 		for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2925 		{
2926 			struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
2927 			if (!newskb)
2928 				return -ENOMEM;
2929 			bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2930 				((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2931 			/*
2932 			 * align to 128 bytes for compatibility with
2933 			 * the old EISA boards.
2934 			 */
2935 
2936 			my_skb_align(newskb, 128);
2937 			bp->descr_block_virt->rcv_data[i + j].long_1 =
2938 				(u32)dma_map_single(bp->bus_dev, newskb->data,
2939 						    NEW_SKB_SIZE,
2940 						    DMA_FROM_DEVICE);
2941 			/*
2942 			 * p_rcv_buff_va is only used inside the
2943 			 * kernel so we put the skb pointer here.
2944 			 */
2945 			bp->p_rcv_buff_va[i+j] = (char *) newskb;
2946 		}
2947 #else
2948 	for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
2949 		for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2950 			{
2951 			bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2952 				((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2953 			bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2954 			bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2955 			}
2956 #endif
2957 	}
2958 
2959 	/* Update receive producer and Type 2 register */
2960 
2961 	bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
2962 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
2963 	return 0;
2964 	}
2965 
2966 
2967 /*
2968  * =========================
2969  * = dfx_rcv_queue_process =
2970  * =========================
2971  *
2972  * Overview:
2973  *   Process received LLC frames.
2974  *
2975  * Returns:
2976  *   None
2977  *
2978  * Arguments:
2979  *   bp - pointer to board information
2980  *
2981  * Functional Description:
2982  *   Received LLC frames are processed until there are no more consumed frames.
2983  *   Once all frames are processed, the receive buffers are returned to the
2984  *   adapter.  Note that this algorithm fixes the length of time that can be spent
2985  *   in this routine, because there are a fixed number of receive buffers to
2986  *   process and buffers are not produced until this routine exits and returns
2987  *   to the ISR.
2988  *
2989  * Return Codes:
2990  *   None
2991  *
2992  * Assumptions:
2993  *   None
2994  *
2995  * Side Effects:
2996  *   None
2997  */
2998 
2999 static void dfx_rcv_queue_process(
3000 	DFX_board_t *bp
3001 	)
3002 
3003 	{
3004 	PI_TYPE_2_CONSUMER	*p_type_2_cons;		/* ptr to rcv/xmt consumer block register */
3005 	char				*p_buff;			/* ptr to start of packet receive buffer (FMC descriptor) */
3006 	u32					descr, pkt_len;		/* FMC descriptor field and packet length */
3007 	struct sk_buff		*skb;				/* pointer to a sk_buff to hold incoming packet data */
3008 
3009 	/* Service all consumed LLC receive frames */
3010 
3011 	p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3012 	while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3013 		{
3014 		/* Process any errors */
3015 
3016 		int entry;
3017 
3018 		entry = bp->rcv_xmt_reg.index.rcv_comp;
3019 #ifdef DYNAMIC_BUFFERS
3020 		p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3021 #else
3022 		p_buff = bp->p_rcv_buff_va[entry];
3023 #endif
3024 		memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3025 
3026 		if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3027 			{
3028 			if (descr & PI_FMC_DESCR_M_RCC_CRC)
3029 				bp->rcv_crc_errors++;
3030 			else
3031 				bp->rcv_frame_status_errors++;
3032 			}
3033 		else
3034 		{
3035 			int rx_in_place = 0;
3036 
3037 			/* The frame was received without errors - verify packet length */
3038 
3039 			pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3040 			pkt_len -= 4;				/* subtract 4 byte CRC */
3041 			if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3042 				bp->rcv_length_errors++;
3043 			else{
3044 #ifdef DYNAMIC_BUFFERS
3045 				if (pkt_len > SKBUFF_RX_COPYBREAK) {
3046 					struct sk_buff *newskb;
3047 
3048 					newskb = dev_alloc_skb(NEW_SKB_SIZE);
3049 					if (newskb){
3050 						rx_in_place = 1;
3051 
3052 						my_skb_align(newskb, 128);
3053 						skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3054 						dma_unmap_single(bp->bus_dev,
3055 							bp->descr_block_virt->rcv_data[entry].long_1,
3056 							NEW_SKB_SIZE,
3057 							DMA_FROM_DEVICE);
3058 						skb_reserve(skb, RCV_BUFF_K_PADDING);
3059 						bp->p_rcv_buff_va[entry] = (char *)newskb;
3060 						bp->descr_block_virt->rcv_data[entry].long_1 =
3061 							(u32)dma_map_single(bp->bus_dev,
3062 								newskb->data,
3063 								NEW_SKB_SIZE,
3064 								DMA_FROM_DEVICE);
3065 					} else
3066 						skb = NULL;
3067 				} else
3068 #endif
3069 					skb = dev_alloc_skb(pkt_len+3);	/* alloc new buffer to pass up, add room for PRH */
3070 				if (skb == NULL)
3071 					{
3072 					printk("%s: Could not allocate receive buffer.  Dropping packet.\n", bp->dev->name);
3073 					bp->rcv_discards++;
3074 					break;
3075 					}
3076 				else {
3077 #ifndef DYNAMIC_BUFFERS
3078 					if (! rx_in_place)
3079 #endif
3080 					{
3081 						/* Receive buffer allocated, pass receive packet up */
3082 
3083 						skb_copy_to_linear_data(skb,
3084 							       p_buff + RCV_BUFF_K_PADDING,
3085 							       pkt_len + 3);
3086 					}
3087 
3088 					skb_reserve(skb,3);		/* adjust data field so that it points to FC byte */
3089 					skb_put(skb, pkt_len);		/* pass up packet length, NOT including CRC */
3090 					skb->protocol = fddi_type_trans(skb, bp->dev);
3091 					bp->rcv_total_bytes += skb->len;
3092 					netif_rx(skb);
3093 
3094 					/* Update the rcv counters */
3095 					bp->rcv_total_frames++;
3096 					if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3097 						bp->rcv_multicast_frames++;
3098 				}
3099 			}
3100 			}
3101 
3102 		/*
3103 		 * Advance the producer (for recycling) and advance the completion
3104 		 * (for servicing received frames).  Note that it is okay to
3105 		 * advance the producer without checking that it passes the
3106 		 * completion index because they are both advanced at the same
3107 		 * rate.
3108 		 */
3109 
3110 		bp->rcv_xmt_reg.index.rcv_prod += 1;
3111 		bp->rcv_xmt_reg.index.rcv_comp += 1;
3112 		}
3113 	}
3114 
3115 
3116 /*
3117  * =====================
3118  * = dfx_xmt_queue_pkt =
3119  * =====================
3120  *
3121  * Overview:
3122  *   Queues packets for transmission
3123  *
3124  * Returns:
3125  *   Condition code
3126  *
3127  * Arguments:
3128  *   skb - pointer to sk_buff to queue for transmission
3129  *   dev - pointer to device information
3130  *
3131  * Functional Description:
3132  *   Here we assume that an incoming skb transmit request
3133  *   is contained in a single physically contiguous buffer
3134  *   in which the virtual address of the start of packet
3135  *   (skb->data) can be converted to a physical address
3136  *   by using pci_map_single().
3137  *
3138  *   Since the adapter architecture requires a three byte
3139  *   packet request header to prepend the start of packet,
3140  *   we'll write the three byte field immediately prior to
3141  *   the FC byte.  This assumption is valid because we've
3142  *   ensured that dev->hard_header_len includes three pad
3143  *   bytes.  By posting a single fragment to the adapter,
3144  *   we'll reduce the number of descriptor fetches and
3145  *   bus traffic needed to send the request.
3146  *
3147  *   Also, we can't free the skb until after it's been DMA'd
3148  *   out by the adapter, so we'll queue it in the driver and
3149  *   return it in dfx_xmt_done.
3150  *
3151  * Return Codes:
3152  *   0 - driver queued packet, link is unavailable, or skbuff was bad
3153  *	 1 - caller should requeue the sk_buff for later transmission
3154  *
3155  * Assumptions:
3156  *	 First and foremost, we assume the incoming skb pointer
3157  *   is NOT NULL and is pointing to a valid sk_buff structure.
3158  *
3159  *   The outgoing packet is complete, starting with the
3160  *   frame control byte including the last byte of data,
3161  *   but NOT including the 4 byte CRC.  We'll let the
3162  *   adapter hardware generate and append the CRC.
3163  *
3164  *   The entire packet is stored in one physically
3165  *   contiguous buffer which is not cached and whose
3166  *   32-bit physical address can be determined.
3167  *
3168  *   It's vital that this routine is NOT reentered for the
3169  *   same board and that the OS is not in another section of
3170  *   code (eg. dfx_int_common) for the same board on a
3171  *   different thread.
3172  *
3173  * Side Effects:
3174  *   None
3175  */
3176 
3177 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3178 				     struct net_device *dev)
3179 	{
3180 	DFX_board_t		*bp = netdev_priv(dev);
3181 	u8			prod;				/* local transmit producer index */
3182 	PI_XMT_DESCR		*p_xmt_descr;		/* ptr to transmit descriptor block entry */
3183 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3184 	unsigned long		flags;
3185 
3186 	netif_stop_queue(dev);
3187 
3188 	/*
3189 	 * Verify that incoming transmit request is OK
3190 	 *
3191 	 * Note: The packet size check is consistent with other
3192 	 *		 Linux device drivers, although the correct packet
3193 	 *		 size should be verified before calling the
3194 	 *		 transmit routine.
3195 	 */
3196 
3197 	if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3198 	{
3199 		printk("%s: Invalid packet length - %u bytes\n",
3200 			dev->name, skb->len);
3201 		bp->xmt_length_errors++;		/* bump error counter */
3202 		netif_wake_queue(dev);
3203 		dev_kfree_skb(skb);
3204 		return NETDEV_TX_OK;			/* return "success" */
3205 	}
3206 	/*
3207 	 * See if adapter link is available, if not, free buffer
3208 	 *
3209 	 * Note: If the link isn't available, free buffer and return 0
3210 	 *		 rather than tell the upper layer to requeue the packet.
3211 	 *		 The methodology here is that by the time the link
3212 	 *		 becomes available, the packet to be sent will be
3213 	 *		 fairly stale.  By simply dropping the packet, the
3214 	 *		 higher layer protocols will eventually time out
3215 	 *		 waiting for response packets which it won't receive.
3216 	 */
3217 
3218 	if (bp->link_available == PI_K_FALSE)
3219 		{
3220 		if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL)	/* is link really available? */
3221 			bp->link_available = PI_K_TRUE;		/* if so, set flag and continue */
3222 		else
3223 			{
3224 			bp->xmt_discards++;					/* bump error counter */
3225 			dev_kfree_skb(skb);		/* free sk_buff now */
3226 			netif_wake_queue(dev);
3227 			return NETDEV_TX_OK;		/* return "success" */
3228 			}
3229 		}
3230 
3231 	spin_lock_irqsave(&bp->lock, flags);
3232 
3233 	/* Get the current producer and the next free xmt data descriptor */
3234 
3235 	prod		= bp->rcv_xmt_reg.index.xmt_prod;
3236 	p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3237 
3238 	/*
3239 	 * Get pointer to auxiliary queue entry to contain information
3240 	 * for this packet.
3241 	 *
3242 	 * Note: The current xmt producer index will become the
3243 	 *	 current xmt completion index when we complete this
3244 	 *	 packet later on.  So, we'll get the pointer to the
3245 	 *	 next auxiliary queue entry now before we bump the
3246 	 *	 producer index.
3247 	 */
3248 
3249 	p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);	/* also bump producer index */
3250 
3251 	/* Write the three PRH bytes immediately before the FC byte */
3252 
3253 	skb_push(skb,3);
3254 	skb->data[0] = DFX_PRH0_BYTE;	/* these byte values are defined */
3255 	skb->data[1] = DFX_PRH1_BYTE;	/* in the Motorola FDDI MAC chip */
3256 	skb->data[2] = DFX_PRH2_BYTE;	/* specification */
3257 
3258 	/*
3259 	 * Write the descriptor with buffer info and bump producer
3260 	 *
3261 	 * Note: Since we need to start DMA from the packet request
3262 	 *		 header, we'll add 3 bytes to the DMA buffer length,
3263 	 *		 and we'll determine the physical address of the
3264 	 *		 buffer from the PRH, not skb->data.
3265 	 *
3266 	 * Assumptions:
3267 	 *		 1. Packet starts with the frame control (FC) byte
3268 	 *		    at skb->data.
3269 	 *		 2. The 4-byte CRC is not appended to the buffer or
3270 	 *			included in the length.
3271 	 *		 3. Packet length (skb->len) is from FC to end of
3272 	 *			data, inclusive.
3273 	 *		 4. The packet length does not exceed the maximum
3274 	 *			FDDI LLC frame length of 4491 bytes.
3275 	 *		 5. The entire packet is contained in a physically
3276 	 *			contiguous, non-cached, locked memory space
3277 	 *			comprised of a single buffer pointed to by
3278 	 *			skb->data.
3279 	 *		 6. The physical address of the start of packet
3280 	 *			can be determined from the virtual address
3281 	 *			by using pci_map_single() and is only 32-bits
3282 	 *			wide.
3283 	 */
3284 
3285 	p_xmt_descr->long_0	= (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3286 	p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
3287 						  skb->len, DMA_TO_DEVICE);
3288 
3289 	/*
3290 	 * Verify that descriptor is actually available
3291 	 *
3292 	 * Note: If descriptor isn't available, return 1 which tells
3293 	 *	 the upper layer to requeue the packet for later
3294 	 *	 transmission.
3295 	 *
3296 	 *       We need to ensure that the producer never reaches the
3297 	 *	 completion, except to indicate that the queue is empty.
3298 	 */
3299 
3300 	if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3301 	{
3302 		skb_pull(skb,3);
3303 		spin_unlock_irqrestore(&bp->lock, flags);
3304 		return NETDEV_TX_BUSY;	/* requeue packet for later */
3305 	}
3306 
3307 	/*
3308 	 * Save info for this packet for xmt done indication routine
3309 	 *
3310 	 * Normally, we'd save the producer index in the p_xmt_drv_descr
3311 	 * structure so that we'd have it handy when we complete this
3312 	 * packet later (in dfx_xmt_done).  However, since the current
3313 	 * transmit architecture guarantees a single fragment for the
3314 	 * entire packet, we can simply bump the completion index by
3315 	 * one (1) for each completed packet.
3316 	 *
3317 	 * Note: If this assumption changes and we're presented with
3318 	 *	 an inconsistent number of transmit fragments for packet
3319 	 *	 data, we'll need to modify this code to save the current
3320 	 *	 transmit producer index.
3321 	 */
3322 
3323 	p_xmt_drv_descr->p_skb = skb;
3324 
3325 	/* Update Type 2 register */
3326 
3327 	bp->rcv_xmt_reg.index.xmt_prod = prod;
3328 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3329 	spin_unlock_irqrestore(&bp->lock, flags);
3330 	netif_wake_queue(dev);
3331 	return NETDEV_TX_OK;	/* packet queued to adapter */
3332 	}
3333 
3334 
3335 /*
3336  * ================
3337  * = dfx_xmt_done =
3338  * ================
3339  *
3340  * Overview:
3341  *   Processes all frames that have been transmitted.
3342  *
3343  * Returns:
3344  *   None
3345  *
3346  * Arguments:
3347  *   bp - pointer to board information
3348  *
3349  * Functional Description:
3350  *   For all consumed transmit descriptors that have not
3351  *   yet been completed, we'll free the skb we were holding
3352  *   onto using dev_kfree_skb and bump the appropriate
3353  *   counters.
3354  *
3355  * Return Codes:
3356  *   None
3357  *
3358  * Assumptions:
3359  *   The Type 2 register is not updated in this routine.  It is
3360  *   assumed that it will be updated in the ISR when dfx_xmt_done
3361  *   returns.
3362  *
3363  * Side Effects:
3364  *   None
3365  */
3366 
3367 static int dfx_xmt_done(DFX_board_t *bp)
3368 	{
3369 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3370 	PI_TYPE_2_CONSUMER	*p_type_2_cons;		/* ptr to rcv/xmt consumer block register */
3371 	u8			comp;			/* local transmit completion index */
3372 	int 			freed = 0;		/* buffers freed */
3373 
3374 	/* Service all consumed transmit frames */
3375 
3376 	p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3377 	while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3378 		{
3379 		/* Get pointer to the transmit driver descriptor block information */
3380 
3381 		p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3382 
3383 		/* Increment transmit counters */
3384 
3385 		bp->xmt_total_frames++;
3386 		bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3387 
3388 		/* Return skb to operating system */
3389 		comp = bp->rcv_xmt_reg.index.xmt_comp;
3390 		dma_unmap_single(bp->bus_dev,
3391 				 bp->descr_block_virt->xmt_data[comp].long_1,
3392 				 p_xmt_drv_descr->p_skb->len,
3393 				 DMA_TO_DEVICE);
3394 		dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3395 
3396 		/*
3397 		 * Move to start of next packet by updating completion index
3398 		 *
3399 		 * Here we assume that a transmit packet request is always
3400 		 * serviced by posting one fragment.  We can therefore
3401 		 * simplify the completion code by incrementing the
3402 		 * completion index by one.  This code will need to be
3403 		 * modified if this assumption changes.  See comments
3404 		 * in dfx_xmt_queue_pkt for more details.
3405 		 */
3406 
3407 		bp->rcv_xmt_reg.index.xmt_comp += 1;
3408 		freed++;
3409 		}
3410 	return freed;
3411 	}
3412 
3413 
3414 /*
3415  * =================
3416  * = dfx_rcv_flush =
3417  * =================
3418  *
3419  * Overview:
3420  *   Remove all skb's in the receive ring.
3421  *
3422  * Returns:
3423  *   None
3424  *
3425  * Arguments:
3426  *   bp - pointer to board information
3427  *
3428  * Functional Description:
3429  *   Free's all the dynamically allocated skb's that are
3430  *   currently attached to the device receive ring. This
3431  *   function is typically only used when the device is
3432  *   initialized or reinitialized.
3433  *
3434  * Return Codes:
3435  *   None
3436  *
3437  * Side Effects:
3438  *   None
3439  */
3440 #ifdef DYNAMIC_BUFFERS
3441 static void dfx_rcv_flush( DFX_board_t *bp )
3442 	{
3443 	int i, j;
3444 
3445 	for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3446 		for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3447 		{
3448 			struct sk_buff *skb;
3449 			skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3450 			if (skb)
3451 				dev_kfree_skb(skb);
3452 			bp->p_rcv_buff_va[i+j] = NULL;
3453 		}
3454 
3455 	}
3456 #else
3457 static inline void dfx_rcv_flush( DFX_board_t *bp )
3458 {
3459 }
3460 #endif /* DYNAMIC_BUFFERS */
3461 
3462 /*
3463  * =================
3464  * = dfx_xmt_flush =
3465  * =================
3466  *
3467  * Overview:
3468  *   Processes all frames whether they've been transmitted
3469  *   or not.
3470  *
3471  * Returns:
3472  *   None
3473  *
3474  * Arguments:
3475  *   bp - pointer to board information
3476  *
3477  * Functional Description:
3478  *   For all produced transmit descriptors that have not
3479  *   yet been completed, we'll free the skb we were holding
3480  *   onto using dev_kfree_skb and bump the appropriate
3481  *   counters.  Of course, it's possible that some of
3482  *   these transmit requests actually did go out, but we
3483  *   won't make that distinction here.  Finally, we'll
3484  *   update the consumer index to match the producer.
3485  *
3486  * Return Codes:
3487  *   None
3488  *
3489  * Assumptions:
3490  *   This routine does NOT update the Type 2 register.  It
3491  *   is assumed that this routine is being called during a
3492  *   transmit flush interrupt, or a shutdown or close routine.
3493  *
3494  * Side Effects:
3495  *   None
3496  */
3497 
3498 static void dfx_xmt_flush( DFX_board_t *bp )
3499 	{
3500 	u32			prod_cons;		/* rcv/xmt consumer block longword */
3501 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3502 	u8			comp;			/* local transmit completion index */
3503 
3504 	/* Flush all outstanding transmit frames */
3505 
3506 	while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3507 		{
3508 		/* Get pointer to the transmit driver descriptor block information */
3509 
3510 		p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3511 
3512 		/* Return skb to operating system */
3513 		comp = bp->rcv_xmt_reg.index.xmt_comp;
3514 		dma_unmap_single(bp->bus_dev,
3515 				 bp->descr_block_virt->xmt_data[comp].long_1,
3516 				 p_xmt_drv_descr->p_skb->len,
3517 				 DMA_TO_DEVICE);
3518 		dev_kfree_skb(p_xmt_drv_descr->p_skb);
3519 
3520 		/* Increment transmit error counter */
3521 
3522 		bp->xmt_discards++;
3523 
3524 		/*
3525 		 * Move to start of next packet by updating completion index
3526 		 *
3527 		 * Here we assume that a transmit packet request is always
3528 		 * serviced by posting one fragment.  We can therefore
3529 		 * simplify the completion code by incrementing the
3530 		 * completion index by one.  This code will need to be
3531 		 * modified if this assumption changes.  See comments
3532 		 * in dfx_xmt_queue_pkt for more details.
3533 		 */
3534 
3535 		bp->rcv_xmt_reg.index.xmt_comp += 1;
3536 		}
3537 
3538 	/* Update the transmit consumer index in the consumer block */
3539 
3540 	prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3541 	prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3542 	bp->cons_block_virt->xmt_rcv_data = prod_cons;
3543 	}
3544 
3545 /*
3546  * ==================
3547  * = dfx_unregister =
3548  * ==================
3549  *
3550  * Overview:
3551  *   Shuts down an FDDI controller
3552  *
3553  * Returns:
3554  *   Condition code
3555  *
3556  * Arguments:
3557  *   bdev - pointer to device information
3558  *
3559  * Functional Description:
3560  *
3561  * Return Codes:
3562  *   None
3563  *
3564  * Assumptions:
3565  *   It compiles so it should work :-( (PCI cards do :-)
3566  *
3567  * Side Effects:
3568  *   Device structures for FDDI adapters (fddi0, fddi1, etc) are
3569  *   freed.
3570  */
3571 static void dfx_unregister(struct device *bdev)
3572 {
3573 	struct net_device *dev = dev_get_drvdata(bdev);
3574 	DFX_board_t *bp = netdev_priv(dev);
3575 	int dfx_bus_pci = dev_is_pci(bdev);
3576 	int dfx_bus_tc = DFX_BUS_TC(bdev);
3577 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3578 	resource_size_t bar_start = 0;		/* pointer to port */
3579 	resource_size_t bar_len = 0;		/* resource length */
3580 	int		alloc_size;		/* total buffer size used */
3581 
3582 	unregister_netdev(dev);
3583 
3584 	alloc_size = sizeof(PI_DESCR_BLOCK) +
3585 		     PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3586 #ifndef DYNAMIC_BUFFERS
3587 		     (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3588 #endif
3589 		     sizeof(PI_CONSUMER_BLOCK) +
3590 		     (PI_ALIGN_K_DESC_BLK - 1);
3591 	if (bp->kmalloced)
3592 		dma_free_coherent(bdev, alloc_size,
3593 				  bp->kmalloced, bp->kmalloced_dma);
3594 
3595 	dfx_bus_uninit(dev);
3596 
3597 	dfx_get_bars(bdev, &bar_start, &bar_len);
3598 	if (dfx_use_mmio) {
3599 		iounmap(bp->base.mem);
3600 		release_mem_region(bar_start, bar_len);
3601 	} else
3602 		release_region(bar_start, bar_len);
3603 
3604 	if (dfx_bus_pci)
3605 		pci_disable_device(to_pci_dev(bdev));
3606 
3607 	free_netdev(dev);
3608 }
3609 
3610 
3611 static int __maybe_unused dfx_dev_register(struct device *);
3612 static int __maybe_unused dfx_dev_unregister(struct device *);
3613 
3614 #ifdef CONFIG_PCI
3615 static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3616 static void dfx_pci_unregister(struct pci_dev *);
3617 
3618 static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
3619 	{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3620 	{ }
3621 };
3622 MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3623 
3624 static struct pci_driver dfx_pci_driver = {
3625 	.name		= "defxx",
3626 	.id_table	= dfx_pci_table,
3627 	.probe		= dfx_pci_register,
3628 	.remove		= dfx_pci_unregister,
3629 };
3630 
3631 static int dfx_pci_register(struct pci_dev *pdev,
3632 			    const struct pci_device_id *ent)
3633 {
3634 	return dfx_register(&pdev->dev);
3635 }
3636 
3637 static void dfx_pci_unregister(struct pci_dev *pdev)
3638 {
3639 	dfx_unregister(&pdev->dev);
3640 }
3641 #endif /* CONFIG_PCI */
3642 
3643 #ifdef CONFIG_EISA
3644 static struct eisa_device_id dfx_eisa_table[] = {
3645         { "DEC3001", DEFEA_PROD_ID_1 },
3646         { "DEC3002", DEFEA_PROD_ID_2 },
3647         { "DEC3003", DEFEA_PROD_ID_3 },
3648         { "DEC3004", DEFEA_PROD_ID_4 },
3649         { }
3650 };
3651 MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3652 
3653 static struct eisa_driver dfx_eisa_driver = {
3654 	.id_table	= dfx_eisa_table,
3655 	.driver		= {
3656 		.name	= "defxx",
3657 		.bus	= &eisa_bus_type,
3658 		.probe	= dfx_dev_register,
3659 		.remove	= dfx_dev_unregister,
3660 	},
3661 };
3662 #endif /* CONFIG_EISA */
3663 
3664 #ifdef CONFIG_TC
3665 static struct tc_device_id const dfx_tc_table[] = {
3666 	{ "DEC     ", "PMAF-FA " },
3667 	{ "DEC     ", "PMAF-FD " },
3668 	{ "DEC     ", "PMAF-FS " },
3669 	{ "DEC     ", "PMAF-FU " },
3670 	{ }
3671 };
3672 MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3673 
3674 static struct tc_driver dfx_tc_driver = {
3675 	.id_table	= dfx_tc_table,
3676 	.driver		= {
3677 		.name	= "defxx",
3678 		.bus	= &tc_bus_type,
3679 		.probe	= dfx_dev_register,
3680 		.remove	= dfx_dev_unregister,
3681 	},
3682 };
3683 #endif /* CONFIG_TC */
3684 
3685 static int __maybe_unused dfx_dev_register(struct device *dev)
3686 {
3687 	int status;
3688 
3689 	status = dfx_register(dev);
3690 	if (!status)
3691 		get_device(dev);
3692 	return status;
3693 }
3694 
3695 static int __maybe_unused dfx_dev_unregister(struct device *dev)
3696 {
3697 	put_device(dev);
3698 	dfx_unregister(dev);
3699 	return 0;
3700 }
3701 
3702 
3703 static int dfx_init(void)
3704 {
3705 	int status;
3706 
3707 	status = pci_register_driver(&dfx_pci_driver);
3708 	if (!status)
3709 		status = eisa_driver_register(&dfx_eisa_driver);
3710 	if (!status)
3711 		status = tc_register_driver(&dfx_tc_driver);
3712 	return status;
3713 }
3714 
3715 static void dfx_cleanup(void)
3716 {
3717 	tc_unregister_driver(&dfx_tc_driver);
3718 	eisa_driver_unregister(&dfx_eisa_driver);
3719 	pci_unregister_driver(&dfx_pci_driver);
3720 }
3721 
3722 module_init(dfx_init);
3723 module_exit(dfx_cleanup);
3724 MODULE_AUTHOR("Lawrence V. Stefani");
3725 MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3726 		   DRV_VERSION " " DRV_RELDATE);
3727 MODULE_LICENSE("GPL");
3728