xref: /openbmc/linux/drivers/net/fddi/defxx.c (revision 6774def6)
1 /*
2  * File Name:
3  *   defxx.c
4  *
5  * Copyright Information:
6  *   Copyright Digital Equipment Corporation 1996.
7  *
8  *   This software may be used and distributed according to the terms of
9  *   the GNU General Public License, incorporated herein by reference.
10  *
11  * Abstract:
12  *   A Linux device driver supporting the Digital Equipment Corporation
13  *   FDDI TURBOchannel, EISA and PCI controller families.  Supported
14  *   adapters include:
15  *
16  *		DEC FDDIcontroller/TURBOchannel (DEFTA)
17  *		DEC FDDIcontroller/EISA         (DEFEA)
18  *		DEC FDDIcontroller/PCI          (DEFPA)
19  *
20  * The original author:
21  *   LVS	Lawrence V. Stefani <lstefani@yahoo.com>
22  *
23  * Maintainers:
24  *   macro	Maciej W. Rozycki <macro@linux-mips.org>
25  *
26  * Credits:
27  *   I'd like to thank Patricia Cross for helping me get started with
28  *   Linux, David Davies for a lot of help upgrading and configuring
29  *   my development system and for answering many OS and driver
30  *   development questions, and Alan Cox for recommendations and
31  *   integration help on getting FDDI support into Linux.  LVS
32  *
33  * Driver Architecture:
34  *   The driver architecture is largely based on previous driver work
35  *   for other operating systems.  The upper edge interface and
36  *   functions were largely taken from existing Linux device drivers
37  *   such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
38  *   driver.
39  *
40  *   Adapter Probe -
41  *		The driver scans for supported EISA adapters by reading the
42  *		SLOT ID register for each EISA slot and making a match
43  *		against the expected value.
44  *
45  *   Bus-Specific Initialization -
46  *		This driver currently supports both EISA and PCI controller
47  *		families.  While the custom DMA chip and FDDI logic is similar
48  *		or identical, the bus logic is very different.  After
49  *		initialization, the	only bus-specific differences is in how the
50  *		driver enables and disables interrupts.  Other than that, the
51  *		run-time critical code behaves the same on both families.
52  *		It's important to note that both adapter families are configured
53  *		to I/O map, rather than memory map, the adapter registers.
54  *
55  *   Driver Open/Close -
56  *		In the driver open routine, the driver ISR (interrupt service
57  *		routine) is registered and the adapter is brought to an
58  *		operational state.  In the driver close routine, the opposite
59  *		occurs; the driver ISR is deregistered and the adapter is
60  *		brought to a safe, but closed state.  Users may use consecutive
61  *		commands to bring the adapter up and down as in the following
62  *		example:
63  *					ifconfig fddi0 up
64  *					ifconfig fddi0 down
65  *					ifconfig fddi0 up
66  *
67  *   Driver Shutdown -
68  *		Apparently, there is no shutdown or halt routine support under
69  *		Linux.  This routine would be called during "reboot" or
70  *		"shutdown" to allow the driver to place the adapter in a safe
71  *		state before a warm reboot occurs.  To be really safe, the user
72  *		should close the adapter before shutdown (eg. ifconfig fddi0 down)
73  *		to ensure that the adapter DMA engine is taken off-line.  However,
74  *		the current driver code anticipates this problem and always issues
75  *		a soft reset of the adapter	at the beginning of driver initialization.
76  *		A future driver enhancement in this area may occur in 2.1.X where
77  *		Alan indicated that a shutdown handler may be implemented.
78  *
79  *   Interrupt Service Routine -
80  *		The driver supports shared interrupts, so the ISR is registered for
81  *		each board with the appropriate flag and the pointer to that board's
82  *		device structure.  This provides the context during interrupt
83  *		processing to support shared interrupts and multiple boards.
84  *
85  *		Interrupt enabling/disabling can occur at many levels.  At the host
86  *		end, you can disable system interrupts, or disable interrupts at the
87  *		PIC (on Intel systems).  Across the bus, both EISA and PCI adapters
88  *		have a bus-logic chip interrupt enable/disable as well as a DMA
89  *		controller interrupt enable/disable.
90  *
91  *		The driver currently enables and disables adapter interrupts at the
92  *		bus-logic chip and assumes that Linux will take care of clearing or
93  *		acknowledging any host-based interrupt chips.
94  *
95  *   Control Functions -
96  *		Control functions are those used to support functions such as adding
97  *		or deleting multicast addresses, enabling or disabling packet
98  *		reception filters, or other custom/proprietary commands.  Presently,
99  *		the driver supports the "get statistics", "set multicast list", and
100  *		"set mac address" functions defined by Linux.  A list of possible
101  *		enhancements include:
102  *
103  *				- Custom ioctl interface for executing port interface commands
104  *				- Custom ioctl interface for adding unicast addresses to
105  *				  adapter CAM (to support bridge functions).
106  *				- Custom ioctl interface for supporting firmware upgrades.
107  *
108  *   Hardware (port interface) Support Routines -
109  *		The driver function names that start with "dfx_hw_" represent
110  *		low-level port interface routines that are called frequently.  They
111  *		include issuing a DMA or port control command to the adapter,
112  *		resetting the adapter, or reading the adapter state.  Since the
113  *		driver initialization and run-time code must make calls into the
114  *		port interface, these routines were written to be as generic and
115  *		usable as possible.
116  *
117  *   Receive Path -
118  *		The adapter DMA engine supports a 256 entry receive descriptor block
119  *		of which up to 255 entries can be used at any given time.  The
120  *		architecture is a standard producer, consumer, completion model in
121  *		which the driver "produces" receive buffers to the adapter, the
122  *		adapter "consumes" the receive buffers by DMAing incoming packet data,
123  *		and the driver "completes" the receive buffers by servicing the
124  *		incoming packet, then "produces" a new buffer and starts the cycle
125  *		again.  Receive buffers can be fragmented in up to 16 fragments
126  *		(descriptor	entries).  For simplicity, this driver posts
127  *		single-fragment receive buffers of 4608 bytes, then allocates a
128  *		sk_buff, copies the data, then reposts the buffer.  To reduce CPU
129  *		utilization, a better approach would be to pass up the receive
130  *		buffer (no extra copy) then allocate and post a replacement buffer.
131  *		This is a performance enhancement that should be looked into at
132  *		some point.
133  *
134  *   Transmit Path -
135  *		Like the receive path, the adapter DMA engine supports a 256 entry
136  *		transmit descriptor block of which up to 255 entries can be used at
137  *		any	given time.  Transmit buffers can be fragmented	in up to 255
138  *		fragments (descriptor entries).  This driver always posts one
139  *		fragment per transmit packet request.
140  *
141  *		The fragment contains the entire packet from FC to end of data.
142  *		Before posting the buffer to the adapter, the driver sets a three-byte
143  *		packet request header (PRH) which is required by the Motorola MAC chip
144  *		used on the adapters.  The PRH tells the MAC the type of token to
145  *		receive/send, whether or not to generate and append the CRC, whether
146  *		synchronous or asynchronous framing is used, etc.  Since the PRH
147  *		definition is not necessarily consistent across all FDDI chipsets,
148  *		the driver, rather than the common FDDI packet handler routines,
149  *		sets these bytes.
150  *
151  *		To reduce the amount of descriptor fetches needed per transmit request,
152  *		the driver takes advantage of the fact that there are at least three
153  *		bytes available before the skb->data field on the outgoing transmit
154  *		request.  This is guaranteed by having fddi_setup() in net_init.c set
155  *		dev->hard_header_len to 24 bytes.  21 bytes accounts for the largest
156  *		header in an 802.2 SNAP frame.  The other 3 bytes are the extra "pad"
157  *		bytes which we'll use to store the PRH.
158  *
159  *		There's a subtle advantage to adding these pad bytes to the
160  *		hard_header_len, it ensures that the data portion of the packet for
161  *		an 802.2 SNAP frame is longword aligned.  Other FDDI driver
162  *		implementations may not need the extra padding and can start copying
163  *		or DMAing directly from the FC byte which starts at skb->data.  Should
164  *		another driver implementation need ADDITIONAL padding, the net_init.c
165  *		module should be updated and dev->hard_header_len should be increased.
166  *		NOTE: To maintain the alignment on the data portion of the packet,
167  *		dev->hard_header_len should always be evenly divisible by 4 and at
168  *		least 24 bytes in size.
169  *
170  * Modification History:
171  *		Date		Name	Description
172  *		16-Aug-96	LVS		Created.
173  *		20-Aug-96	LVS		Updated dfx_probe so that version information
174  *							string is only displayed if 1 or more cards are
175  *							found.  Changed dfx_rcv_queue_process to copy
176  *							3 NULL bytes before FC to ensure that data is
177  *							longword aligned in receive buffer.
178  *		09-Sep-96	LVS		Updated dfx_ctl_set_multicast_list to enable
179  *							LLC group promiscuous mode if multicast list
180  *							is too large.  LLC individual/group promiscuous
181  *							mode is now disabled if IFF_PROMISC flag not set.
182  *							dfx_xmt_queue_pkt no longer checks for NULL skb
183  *							on Alan Cox recommendation.  Added node address
184  *							override support.
185  *		12-Sep-96	LVS		Reset current address to factory address during
186  *							device open.  Updated transmit path to post a
187  *							single fragment which includes PRH->end of data.
188  *		Mar 2000	AC		Did various cleanups for 2.3.x
189  *		Jun 2000	jgarzik		PCI and resource alloc cleanups
190  *		Jul 2000	tjeerd		Much cleanup and some bug fixes
191  *		Sep 2000	tjeerd		Fix leak on unload, cosmetic code cleanup
192  *		Feb 2001			Skb allocation fixes
193  *		Feb 2001	davej		PCI enable cleanups.
194  *		04 Aug 2003	macro		Converted to the DMA API.
195  *		14 Aug 2004	macro		Fix device names reported.
196  *		14 Jun 2005	macro		Use irqreturn_t.
197  *		23 Oct 2006	macro		Big-endian host support.
198  *		14 Dec 2006	macro		TURBOchannel support.
199  *		01 Jul 2014	macro		Fixes for DMA on 64-bit hosts.
200  */
201 
202 /* Include files */
203 #include <linux/bitops.h>
204 #include <linux/compiler.h>
205 #include <linux/delay.h>
206 #include <linux/dma-mapping.h>
207 #include <linux/eisa.h>
208 #include <linux/errno.h>
209 #include <linux/fddidevice.h>
210 #include <linux/interrupt.h>
211 #include <linux/ioport.h>
212 #include <linux/kernel.h>
213 #include <linux/module.h>
214 #include <linux/netdevice.h>
215 #include <linux/pci.h>
216 #include <linux/skbuff.h>
217 #include <linux/slab.h>
218 #include <linux/string.h>
219 #include <linux/tc.h>
220 
221 #include <asm/byteorder.h>
222 #include <asm/io.h>
223 
224 #include "defxx.h"
225 
226 /* Version information string should be updated prior to each new release!  */
227 #define DRV_NAME "defxx"
228 #define DRV_VERSION "v1.11"
229 #define DRV_RELDATE "2014/07/01"
230 
231 static char version[] =
232 	DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 	"  Lawrence V. Stefani and others\n";
234 
235 #define DYNAMIC_BUFFERS 1
236 
237 #define SKBUFF_RX_COPYBREAK 200
238 /*
239  * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
240  * alignment for compatibility with old EISA boards.
241  */
242 #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
243 
244 #ifdef CONFIG_EISA
245 #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
246 #else
247 #define DFX_BUS_EISA(dev) 0
248 #endif
249 
250 #ifdef CONFIG_TC
251 #define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
252 #else
253 #define DFX_BUS_TC(dev) 0
254 #endif
255 
256 #ifdef CONFIG_DEFXX_MMIO
257 #define DFX_MMIO 1
258 #else
259 #define DFX_MMIO 0
260 #endif
261 
262 /* Define module-wide (static) routines */
263 
264 static void		dfx_bus_init(struct net_device *dev);
265 static void		dfx_bus_uninit(struct net_device *dev);
266 static void		dfx_bus_config_check(DFX_board_t *bp);
267 
268 static int		dfx_driver_init(struct net_device *dev,
269 					const char *print_name,
270 					resource_size_t bar_start);
271 static int		dfx_adap_init(DFX_board_t *bp, int get_buffers);
272 
273 static int		dfx_open(struct net_device *dev);
274 static int		dfx_close(struct net_device *dev);
275 
276 static void		dfx_int_pr_halt_id(DFX_board_t *bp);
277 static void		dfx_int_type_0_process(DFX_board_t *bp);
278 static void		dfx_int_common(struct net_device *dev);
279 static irqreturn_t	dfx_interrupt(int irq, void *dev_id);
280 
281 static struct		net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
282 static void		dfx_ctl_set_multicast_list(struct net_device *dev);
283 static int		dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
284 static int		dfx_ctl_update_cam(DFX_board_t *bp);
285 static int		dfx_ctl_update_filters(DFX_board_t *bp);
286 
287 static int		dfx_hw_dma_cmd_req(DFX_board_t *bp);
288 static int		dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32	command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
289 static void		dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
290 static int		dfx_hw_adap_state_rd(DFX_board_t *bp);
291 static int		dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
292 
293 static int		dfx_rcv_init(DFX_board_t *bp, int get_buffers);
294 static void		dfx_rcv_queue_process(DFX_board_t *bp);
295 #ifdef DYNAMIC_BUFFERS
296 static void		dfx_rcv_flush(DFX_board_t *bp);
297 #else
298 static inline void	dfx_rcv_flush(DFX_board_t *bp) {}
299 #endif
300 
301 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
302 				     struct net_device *dev);
303 static int		dfx_xmt_done(DFX_board_t *bp);
304 static void		dfx_xmt_flush(DFX_board_t *bp);
305 
306 /* Define module-wide (static) variables */
307 
308 static struct pci_driver dfx_pci_driver;
309 static struct eisa_driver dfx_eisa_driver;
310 static struct tc_driver dfx_tc_driver;
311 
312 
313 /*
314  * =======================
315  * = dfx_port_write_long =
316  * = dfx_port_read_long  =
317  * =======================
318  *
319  * Overview:
320  *   Routines for reading and writing values from/to adapter
321  *
322  * Returns:
323  *   None
324  *
325  * Arguments:
326  *   bp		- pointer to board information
327  *   offset	- register offset from base I/O address
328  *   data	- for dfx_port_write_long, this is a value to write;
329  *		  for dfx_port_read_long, this is a pointer to store
330  *		  the read value
331  *
332  * Functional Description:
333  *   These routines perform the correct operation to read or write
334  *   the adapter register.
335  *
336  *   EISA port block base addresses are based on the slot number in which the
337  *   controller is installed.  For example, if the EISA controller is installed
338  *   in slot 4, the port block base address is 0x4000.  If the controller is
339  *   installed in slot 2, the port block base address is 0x2000, and so on.
340  *   This port block can be used to access PDQ, ESIC, and DEFEA on-board
341  *   registers using the register offsets defined in DEFXX.H.
342  *
343  *   PCI port block base addresses are assigned by the PCI BIOS or system
344  *   firmware.  There is one 128 byte port block which can be accessed.  It
345  *   allows for I/O mapping of both PDQ and PFI registers using the register
346  *   offsets defined in DEFXX.H.
347  *
348  * Return Codes:
349  *   None
350  *
351  * Assumptions:
352  *   bp->base is a valid base I/O address for this adapter.
353  *   offset is a valid register offset for this adapter.
354  *
355  * Side Effects:
356  *   Rather than produce macros for these functions, these routines
357  *   are defined using "inline" to ensure that the compiler will
358  *   generate inline code and not waste a procedure call and return.
359  *   This provides all the benefits of macros, but with the
360  *   advantage of strict data type checking.
361  */
362 
363 static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
364 {
365 	writel(data, bp->base.mem + offset);
366 	mb();
367 }
368 
369 static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
370 {
371 	outl(data, bp->base.port + offset);
372 }
373 
374 static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
375 {
376 	struct device __maybe_unused *bdev = bp->bus_dev;
377 	int dfx_bus_tc = DFX_BUS_TC(bdev);
378 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
379 
380 	if (dfx_use_mmio)
381 		dfx_writel(bp, offset, data);
382 	else
383 		dfx_outl(bp, offset, data);
384 }
385 
386 
387 static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
388 {
389 	mb();
390 	*data = readl(bp->base.mem + offset);
391 }
392 
393 static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
394 {
395 	*data = inl(bp->base.port + offset);
396 }
397 
398 static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
399 {
400 	struct device __maybe_unused *bdev = bp->bus_dev;
401 	int dfx_bus_tc = DFX_BUS_TC(bdev);
402 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
403 
404 	if (dfx_use_mmio)
405 		dfx_readl(bp, offset, data);
406 	else
407 		dfx_inl(bp, offset, data);
408 }
409 
410 
411 /*
412  * ================
413  * = dfx_get_bars =
414  * ================
415  *
416  * Overview:
417  *   Retrieves the address range used to access control and status
418  *   registers.
419  *
420  * Returns:
421  *   None
422  *
423  * Arguments:
424  *   bdev	- pointer to device information
425  *   bar_start	- pointer to store the start address
426  *   bar_len	- pointer to store the length of the area
427  *
428  * Assumptions:
429  *   I am sure there are some.
430  *
431  * Side Effects:
432  *   None
433  */
434 static void dfx_get_bars(struct device *bdev,
435 			 resource_size_t *bar_start, resource_size_t *bar_len)
436 {
437 	int dfx_bus_pci = dev_is_pci(bdev);
438 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
439 	int dfx_bus_tc = DFX_BUS_TC(bdev);
440 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
441 
442 	if (dfx_bus_pci) {
443 		int num = dfx_use_mmio ? 0 : 1;
444 
445 		*bar_start = pci_resource_start(to_pci_dev(bdev), num);
446 		*bar_len = pci_resource_len(to_pci_dev(bdev), num);
447 	}
448 	if (dfx_bus_eisa) {
449 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
450 		resource_size_t bar;
451 
452 		if (dfx_use_mmio) {
453 			bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
454 			bar <<= 8;
455 			bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
456 			bar <<= 8;
457 			bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
458 			bar <<= 16;
459 			*bar_start = bar;
460 			bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
461 			bar <<= 8;
462 			bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
463 			bar <<= 8;
464 			bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
465 			bar <<= 16;
466 			*bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
467 		} else {
468 			*bar_start = base_addr;
469 			*bar_len = PI_ESIC_K_CSR_IO_LEN +
470 				   PI_ESIC_K_BURST_HOLDOFF_LEN;
471 		}
472 	}
473 	if (dfx_bus_tc) {
474 		*bar_start = to_tc_dev(bdev)->resource.start +
475 			     PI_TC_K_CSR_OFFSET;
476 		*bar_len = PI_TC_K_CSR_LEN;
477 	}
478 }
479 
480 static const struct net_device_ops dfx_netdev_ops = {
481 	.ndo_open		= dfx_open,
482 	.ndo_stop		= dfx_close,
483 	.ndo_start_xmit		= dfx_xmt_queue_pkt,
484 	.ndo_get_stats		= dfx_ctl_get_stats,
485 	.ndo_set_rx_mode	= dfx_ctl_set_multicast_list,
486 	.ndo_set_mac_address	= dfx_ctl_set_mac_address,
487 };
488 
489 /*
490  * ================
491  * = dfx_register =
492  * ================
493  *
494  * Overview:
495  *   Initializes a supported FDDI controller
496  *
497  * Returns:
498  *   Condition code
499  *
500  * Arguments:
501  *   bdev - pointer to device information
502  *
503  * Functional Description:
504  *
505  * Return Codes:
506  *   0		 - This device (fddi0, fddi1, etc) configured successfully
507  *   -EBUSY      - Failed to get resources, or dfx_driver_init failed.
508  *
509  * Assumptions:
510  *   It compiles so it should work :-( (PCI cards do :-)
511  *
512  * Side Effects:
513  *   Device structures for FDDI adapters (fddi0, fddi1, etc) are
514  *   initialized and the board resources are read and stored in
515  *   the device structure.
516  */
517 static int dfx_register(struct device *bdev)
518 {
519 	static int version_disp;
520 	int dfx_bus_pci = dev_is_pci(bdev);
521 	int dfx_bus_tc = DFX_BUS_TC(bdev);
522 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
523 	const char *print_name = dev_name(bdev);
524 	struct net_device *dev;
525 	DFX_board_t	  *bp;			/* board pointer */
526 	resource_size_t bar_start = 0;		/* pointer to port */
527 	resource_size_t bar_len = 0;		/* resource length */
528 	int alloc_size;				/* total buffer size used */
529 	struct resource *region;
530 	int err = 0;
531 
532 	if (!version_disp) {	/* display version info if adapter is found */
533 		version_disp = 1;	/* set display flag to TRUE so that */
534 		printk(version);	/* we only display this string ONCE */
535 	}
536 
537 	dev = alloc_fddidev(sizeof(*bp));
538 	if (!dev) {
539 		printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
540 		       print_name);
541 		return -ENOMEM;
542 	}
543 
544 	/* Enable PCI device. */
545 	if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
546 		printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
547 		       print_name);
548 		goto err_out;
549 	}
550 
551 	SET_NETDEV_DEV(dev, bdev);
552 
553 	bp = netdev_priv(dev);
554 	bp->bus_dev = bdev;
555 	dev_set_drvdata(bdev, dev);
556 
557 	dfx_get_bars(bdev, &bar_start, &bar_len);
558 
559 	if (dfx_use_mmio)
560 		region = request_mem_region(bar_start, bar_len, print_name);
561 	else
562 		region = request_region(bar_start, bar_len, print_name);
563 	if (!region) {
564 		printk(KERN_ERR "%s: Cannot reserve I/O resource "
565 		       "0x%lx @ 0x%lx, aborting\n",
566 		       print_name, (long)bar_len, (long)bar_start);
567 		err = -EBUSY;
568 		goto err_out_disable;
569 	}
570 
571 	/* Set up I/O base address. */
572 	if (dfx_use_mmio) {
573 		bp->base.mem = ioremap_nocache(bar_start, bar_len);
574 		if (!bp->base.mem) {
575 			printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
576 			err = -ENOMEM;
577 			goto err_out_region;
578 		}
579 	} else {
580 		bp->base.port = bar_start;
581 		dev->base_addr = bar_start;
582 	}
583 
584 	/* Initialize new device structure */
585 	dev->netdev_ops			= &dfx_netdev_ops;
586 
587 	if (dfx_bus_pci)
588 		pci_set_master(to_pci_dev(bdev));
589 
590 	if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
591 		err = -ENODEV;
592 		goto err_out_unmap;
593 	}
594 
595 	err = register_netdev(dev);
596 	if (err)
597 		goto err_out_kfree;
598 
599 	printk("%s: registered as %s\n", print_name, dev->name);
600 	return 0;
601 
602 err_out_kfree:
603 	alloc_size = sizeof(PI_DESCR_BLOCK) +
604 		     PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
605 #ifndef DYNAMIC_BUFFERS
606 		     (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
607 #endif
608 		     sizeof(PI_CONSUMER_BLOCK) +
609 		     (PI_ALIGN_K_DESC_BLK - 1);
610 	if (bp->kmalloced)
611 		dma_free_coherent(bdev, alloc_size,
612 				  bp->kmalloced, bp->kmalloced_dma);
613 
614 err_out_unmap:
615 	if (dfx_use_mmio)
616 		iounmap(bp->base.mem);
617 
618 err_out_region:
619 	if (dfx_use_mmio)
620 		release_mem_region(bar_start, bar_len);
621 	else
622 		release_region(bar_start, bar_len);
623 
624 err_out_disable:
625 	if (dfx_bus_pci)
626 		pci_disable_device(to_pci_dev(bdev));
627 
628 err_out:
629 	free_netdev(dev);
630 	return err;
631 }
632 
633 
634 /*
635  * ================
636  * = dfx_bus_init =
637  * ================
638  *
639  * Overview:
640  *   Initializes the bus-specific controller logic.
641  *
642  * Returns:
643  *   None
644  *
645  * Arguments:
646  *   dev - pointer to device information
647  *
648  * Functional Description:
649  *   Determine and save adapter IRQ in device table,
650  *   then perform bus-specific logic initialization.
651  *
652  * Return Codes:
653  *   None
654  *
655  * Assumptions:
656  *   bp->base has already been set with the proper
657  *	 base I/O address for this device.
658  *
659  * Side Effects:
660  *   Interrupts are enabled at the adapter bus-specific logic.
661  *   Note:  Interrupts at the DMA engine (PDQ chip) are not
662  *   enabled yet.
663  */
664 
665 static void dfx_bus_init(struct net_device *dev)
666 {
667 	DFX_board_t *bp = netdev_priv(dev);
668 	struct device *bdev = bp->bus_dev;
669 	int dfx_bus_pci = dev_is_pci(bdev);
670 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
671 	int dfx_bus_tc = DFX_BUS_TC(bdev);
672 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
673 	u8 val;
674 
675 	DBG_printk("In dfx_bus_init...\n");
676 
677 	/* Initialize a pointer back to the net_device struct */
678 	bp->dev = dev;
679 
680 	/* Initialize adapter based on bus type */
681 
682 	if (dfx_bus_tc)
683 		dev->irq = to_tc_dev(bdev)->interrupt;
684 	if (dfx_bus_eisa) {
685 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
686 
687 		/* Disable the board before fiddling with the decoders.  */
688 		outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
689 
690 		/* Get the interrupt level from the ESIC chip.  */
691 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
692 		val &= PI_CONFIG_STAT_0_M_IRQ;
693 		val >>= PI_CONFIG_STAT_0_V_IRQ;
694 
695 		switch (val) {
696 		case PI_CONFIG_STAT_0_IRQ_K_9:
697 			dev->irq = 9;
698 			break;
699 
700 		case PI_CONFIG_STAT_0_IRQ_K_10:
701 			dev->irq = 10;
702 			break;
703 
704 		case PI_CONFIG_STAT_0_IRQ_K_11:
705 			dev->irq = 11;
706 			break;
707 
708 		case PI_CONFIG_STAT_0_IRQ_K_15:
709 			dev->irq = 15;
710 			break;
711 		}
712 
713 		/*
714 		 * Enable memory decoding (MEMCS0) and/or port decoding
715 		 * (IOCS1/IOCS0) as appropriate in Function Control
716 		 * Register.  IOCS0 is used for PDQ registers, taking 16
717 		 * 32-bit words, while IOCS1 is used for the Burst Holdoff
718 		 * register, taking a single 32-bit word only.  We use the
719 		 * slot-specific I/O range as per the ESIC spec, that is
720 		 * set bits 15:12 in the mask registers to mask them out.
721 		 */
722 
723 		/* Set the decode range of the board.  */
724 		val = 0;
725 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_1);
726 		val = PI_DEFEA_K_CSR_IO;
727 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_0);
728 
729 		val = PI_IO_CMP_M_SLOT;
730 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_1);
731 		val = (PI_ESIC_K_CSR_IO_LEN - 1) & ~3;
732 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_0);
733 
734 		val = 0;
735 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_1);
736 		val = PI_DEFEA_K_BURST_HOLDOFF;
737 		outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_0);
738 
739 		val = PI_IO_CMP_M_SLOT;
740 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_1);
741 		val = (PI_ESIC_K_BURST_HOLDOFF_LEN - 1) & ~3;
742 		outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0);
743 
744 		/* Enable the decoders.  */
745 		val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
746 		if (dfx_use_mmio)
747 			val |= PI_FUNCTION_CNTRL_M_MEMCS0;
748 		outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
749 
750 		/*
751 		 * Enable access to the rest of the module
752 		 * (including PDQ and packet memory).
753 		 */
754 		val = PI_SLOT_CNTRL_M_ENB;
755 		outb(val, base_addr + PI_ESIC_K_SLOT_CNTRL);
756 
757 		/*
758 		 * Map PDQ registers into memory or port space.  This is
759 		 * done with a bit in the Burst Holdoff register.
760 		 */
761 		val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
762 		if (dfx_use_mmio)
763 			val |= PI_BURST_HOLDOFF_M_MEM_MAP;
764 		else
765 			val &= ~PI_BURST_HOLDOFF_M_MEM_MAP;
766 		outb(val, base_addr + PI_DEFEA_K_BURST_HOLDOFF);
767 
768 		/* Enable interrupts at EISA bus interface chip (ESIC) */
769 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
770 		val |= PI_CONFIG_STAT_0_M_INT_ENB;
771 		outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
772 	}
773 	if (dfx_bus_pci) {
774 		struct pci_dev *pdev = to_pci_dev(bdev);
775 
776 		/* Get the interrupt level from the PCI Configuration Table */
777 
778 		dev->irq = pdev->irq;
779 
780 		/* Check Latency Timer and set if less than minimal */
781 
782 		pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
783 		if (val < PFI_K_LAT_TIMER_MIN) {
784 			val = PFI_K_LAT_TIMER_DEF;
785 			pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
786 		}
787 
788 		/* Enable interrupts at PCI bus interface chip (PFI) */
789 		val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
790 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
791 	}
792 }
793 
794 /*
795  * ==================
796  * = dfx_bus_uninit =
797  * ==================
798  *
799  * Overview:
800  *   Uninitializes the bus-specific controller logic.
801  *
802  * Returns:
803  *   None
804  *
805  * Arguments:
806  *   dev - pointer to device information
807  *
808  * Functional Description:
809  *   Perform bus-specific logic uninitialization.
810  *
811  * Return Codes:
812  *   None
813  *
814  * Assumptions:
815  *   bp->base has already been set with the proper
816  *	 base I/O address for this device.
817  *
818  * Side Effects:
819  *   Interrupts are disabled at the adapter bus-specific logic.
820  */
821 
822 static void dfx_bus_uninit(struct net_device *dev)
823 {
824 	DFX_board_t *bp = netdev_priv(dev);
825 	struct device *bdev = bp->bus_dev;
826 	int dfx_bus_pci = dev_is_pci(bdev);
827 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
828 	u8 val;
829 
830 	DBG_printk("In dfx_bus_uninit...\n");
831 
832 	/* Uninitialize adapter based on bus type */
833 
834 	if (dfx_bus_eisa) {
835 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
836 
837 		/* Disable interrupts at EISA bus interface chip (ESIC) */
838 		val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
839 		val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
840 		outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
841 	}
842 	if (dfx_bus_pci) {
843 		/* Disable interrupts at PCI bus interface chip (PFI) */
844 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
845 	}
846 }
847 
848 
849 /*
850  * ========================
851  * = dfx_bus_config_check =
852  * ========================
853  *
854  * Overview:
855  *   Checks the configuration (burst size, full-duplex, etc.)  If any parameters
856  *   are illegal, then this routine will set new defaults.
857  *
858  * Returns:
859  *   None
860  *
861  * Arguments:
862  *   bp - pointer to board information
863  *
864  * Functional Description:
865  *   For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
866  *   PDQ, and all FDDI PCI controllers, all values are legal.
867  *
868  * Return Codes:
869  *   None
870  *
871  * Assumptions:
872  *   dfx_adap_init has NOT been called yet so burst size and other items have
873  *   not been set.
874  *
875  * Side Effects:
876  *   None
877  */
878 
879 static void dfx_bus_config_check(DFX_board_t *bp)
880 {
881 	struct device __maybe_unused *bdev = bp->bus_dev;
882 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
883 	int	status;				/* return code from adapter port control call */
884 	u32	host_data;			/* LW data returned from port control call */
885 
886 	DBG_printk("In dfx_bus_config_check...\n");
887 
888 	/* Configuration check only valid for EISA adapter */
889 
890 	if (dfx_bus_eisa) {
891 		/*
892 		 * First check if revision 2 EISA controller.  Rev. 1 cards used
893 		 * PDQ revision B, so no workaround needed in this case.  Rev. 3
894 		 * cards used PDQ revision E, so no workaround needed in this
895 		 * case, either.  Only Rev. 2 cards used either Rev. D or E
896 		 * chips, so we must verify the chip revision on Rev. 2 cards.
897 		 */
898 		if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
899 			/*
900 			 * Revision 2 FDDI EISA controller found,
901 			 * so let's check PDQ revision of adapter.
902 			 */
903 			status = dfx_hw_port_ctrl_req(bp,
904 											PI_PCTRL_M_SUB_CMD,
905 											PI_SUB_CMD_K_PDQ_REV_GET,
906 											0,
907 											&host_data);
908 			if ((status != DFX_K_SUCCESS) || (host_data == 2))
909 				{
910 				/*
911 				 * Either we couldn't determine the PDQ revision, or
912 				 * we determined that it is at revision D.  In either case,
913 				 * we need to implement the workaround.
914 				 */
915 
916 				/* Ensure that the burst size is set to 8 longwords or less */
917 
918 				switch (bp->burst_size)
919 					{
920 					case PI_PDATA_B_DMA_BURST_SIZE_32:
921 					case PI_PDATA_B_DMA_BURST_SIZE_16:
922 						bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
923 						break;
924 
925 					default:
926 						break;
927 					}
928 
929 				/* Ensure that full-duplex mode is not enabled */
930 
931 				bp->full_duplex_enb = PI_SNMP_K_FALSE;
932 				}
933 			}
934 		}
935 	}
936 
937 
938 /*
939  * ===================
940  * = dfx_driver_init =
941  * ===================
942  *
943  * Overview:
944  *   Initializes remaining adapter board structure information
945  *   and makes sure adapter is in a safe state prior to dfx_open().
946  *
947  * Returns:
948  *   Condition code
949  *
950  * Arguments:
951  *   dev - pointer to device information
952  *   print_name - printable device name
953  *
954  * Functional Description:
955  *   This function allocates additional resources such as the host memory
956  *   blocks needed by the adapter (eg. descriptor and consumer blocks).
957  *	 Remaining bus initialization steps are also completed.  The adapter
958  *   is also reset so that it is in the DMA_UNAVAILABLE state.  The OS
959  *   must call dfx_open() to open the adapter and bring it on-line.
960  *
961  * Return Codes:
962  *   DFX_K_SUCCESS	- initialization succeeded
963  *   DFX_K_FAILURE	- initialization failed - could not allocate memory
964  *						or read adapter MAC address
965  *
966  * Assumptions:
967  *   Memory allocated from pci_alloc_consistent() call is physically
968  *   contiguous, locked memory.
969  *
970  * Side Effects:
971  *   Adapter is reset and should be in DMA_UNAVAILABLE state before
972  *   returning from this routine.
973  */
974 
975 static int dfx_driver_init(struct net_device *dev, const char *print_name,
976 			   resource_size_t bar_start)
977 {
978 	DFX_board_t *bp = netdev_priv(dev);
979 	struct device *bdev = bp->bus_dev;
980 	int dfx_bus_pci = dev_is_pci(bdev);
981 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
982 	int dfx_bus_tc = DFX_BUS_TC(bdev);
983 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
984 	int alloc_size;			/* total buffer size needed */
985 	char *top_v, *curr_v;		/* virtual addrs into memory block */
986 	dma_addr_t top_p, curr_p;	/* physical addrs into memory block */
987 	u32 data;			/* host data register value */
988 	__le32 le32;
989 	char *board_name = NULL;
990 
991 	DBG_printk("In dfx_driver_init...\n");
992 
993 	/* Initialize bus-specific hardware registers */
994 
995 	dfx_bus_init(dev);
996 
997 	/*
998 	 * Initialize default values for configurable parameters
999 	 *
1000 	 * Note: All of these parameters are ones that a user may
1001 	 *       want to customize.  It'd be nice to break these
1002 	 *		 out into Space.c or someplace else that's more
1003 	 *		 accessible/understandable than this file.
1004 	 */
1005 
1006 	bp->full_duplex_enb		= PI_SNMP_K_FALSE;
1007 	bp->req_ttrt			= 8 * 12500;		/* 8ms in 80 nanosec units */
1008 	bp->burst_size			= PI_PDATA_B_DMA_BURST_SIZE_DEF;
1009 	bp->rcv_bufs_to_post	= RCV_BUFS_DEF;
1010 
1011 	/*
1012 	 * Ensure that HW configuration is OK
1013 	 *
1014 	 * Note: Depending on the hardware revision, we may need to modify
1015 	 *       some of the configurable parameters to workaround hardware
1016 	 *       limitations.  We'll perform this configuration check AFTER
1017 	 *       setting the parameters to their default values.
1018 	 */
1019 
1020 	dfx_bus_config_check(bp);
1021 
1022 	/* Disable PDQ interrupts first */
1023 
1024 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1025 
1026 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1027 
1028 	(void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1029 
1030 	/*  Read the factory MAC address from the adapter then save it */
1031 
1032 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1033 				 &data) != DFX_K_SUCCESS) {
1034 		printk("%s: Could not read adapter factory MAC address!\n",
1035 		       print_name);
1036 		return DFX_K_FAILURE;
1037 	}
1038 	le32 = cpu_to_le32(data);
1039 	memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1040 
1041 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1042 				 &data) != DFX_K_SUCCESS) {
1043 		printk("%s: Could not read adapter factory MAC address!\n",
1044 		       print_name);
1045 		return DFX_K_FAILURE;
1046 	}
1047 	le32 = cpu_to_le32(data);
1048 	memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1049 
1050 	/*
1051 	 * Set current address to factory address
1052 	 *
1053 	 * Note: Node address override support is handled through
1054 	 *       dfx_ctl_set_mac_address.
1055 	 */
1056 
1057 	memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1058 	if (dfx_bus_tc)
1059 		board_name = "DEFTA";
1060 	if (dfx_bus_eisa)
1061 		board_name = "DEFEA";
1062 	if (dfx_bus_pci)
1063 		board_name = "DEFPA";
1064 	pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1065 		print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1066 		(long long)bar_start, dev->irq, dev->dev_addr);
1067 
1068 	/*
1069 	 * Get memory for descriptor block, consumer block, and other buffers
1070 	 * that need to be DMA read or written to by the adapter.
1071 	 */
1072 
1073 	alloc_size = sizeof(PI_DESCR_BLOCK) +
1074 					PI_CMD_REQ_K_SIZE_MAX +
1075 					PI_CMD_RSP_K_SIZE_MAX +
1076 #ifndef DYNAMIC_BUFFERS
1077 					(bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1078 #endif
1079 					sizeof(PI_CONSUMER_BLOCK) +
1080 					(PI_ALIGN_K_DESC_BLK - 1);
1081 	bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size,
1082 						    &bp->kmalloced_dma,
1083 						    GFP_ATOMIC);
1084 	if (top_v == NULL)
1085 		return DFX_K_FAILURE;
1086 
1087 	top_p = bp->kmalloced_dma;	/* get physical address of buffer */
1088 
1089 	/*
1090 	 *  To guarantee the 8K alignment required for the descriptor block, 8K - 1
1091 	 *  plus the amount of memory needed was allocated.  The physical address
1092 	 *	is now 8K aligned.  By carving up the memory in a specific order,
1093 	 *  we'll guarantee the alignment requirements for all other structures.
1094 	 *
1095 	 *  Note: If the assumptions change regarding the non-paged, non-cached,
1096 	 *		  physically contiguous nature of the memory block or the address
1097 	 *		  alignments, then we'll need to implement a different algorithm
1098 	 *		  for allocating the needed memory.
1099 	 */
1100 
1101 	curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1102 	curr_v = top_v + (curr_p - top_p);
1103 
1104 	/* Reserve space for descriptor block */
1105 
1106 	bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1107 	bp->descr_block_phys = curr_p;
1108 	curr_v += sizeof(PI_DESCR_BLOCK);
1109 	curr_p += sizeof(PI_DESCR_BLOCK);
1110 
1111 	/* Reserve space for command request buffer */
1112 
1113 	bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1114 	bp->cmd_req_phys = curr_p;
1115 	curr_v += PI_CMD_REQ_K_SIZE_MAX;
1116 	curr_p += PI_CMD_REQ_K_SIZE_MAX;
1117 
1118 	/* Reserve space for command response buffer */
1119 
1120 	bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1121 	bp->cmd_rsp_phys = curr_p;
1122 	curr_v += PI_CMD_RSP_K_SIZE_MAX;
1123 	curr_p += PI_CMD_RSP_K_SIZE_MAX;
1124 
1125 	/* Reserve space for the LLC host receive queue buffers */
1126 
1127 	bp->rcv_block_virt = curr_v;
1128 	bp->rcv_block_phys = curr_p;
1129 
1130 #ifndef DYNAMIC_BUFFERS
1131 	curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1132 	curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1133 #endif
1134 
1135 	/* Reserve space for the consumer block */
1136 
1137 	bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1138 	bp->cons_block_phys = curr_p;
1139 
1140 	/* Display virtual and physical addresses if debug driver */
1141 
1142 	DBG_printk("%s: Descriptor block virt = %p, phys = %pad\n",
1143 		   print_name, bp->descr_block_virt, &bp->descr_block_phys);
1144 	DBG_printk("%s: Command Request buffer virt = %p, phys = %pad\n",
1145 		   print_name, bp->cmd_req_virt, &bp->cmd_req_phys);
1146 	DBG_printk("%s: Command Response buffer virt = %p, phys = %pad\n",
1147 		   print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys);
1148 	DBG_printk("%s: Receive buffer block virt = %p, phys = %pad\n",
1149 		   print_name, bp->rcv_block_virt, &bp->rcv_block_phys);
1150 	DBG_printk("%s: Consumer block virt = %p, phys = %pad\n",
1151 		   print_name, bp->cons_block_virt, &bp->cons_block_phys);
1152 
1153 	return DFX_K_SUCCESS;
1154 }
1155 
1156 
1157 /*
1158  * =================
1159  * = dfx_adap_init =
1160  * =================
1161  *
1162  * Overview:
1163  *   Brings the adapter to the link avail/link unavailable state.
1164  *
1165  * Returns:
1166  *   Condition code
1167  *
1168  * Arguments:
1169  *   bp - pointer to board information
1170  *   get_buffers - non-zero if buffers to be allocated
1171  *
1172  * Functional Description:
1173  *   Issues the low-level firmware/hardware calls necessary to bring
1174  *   the adapter up, or to properly reset and restore adapter during
1175  *   run-time.
1176  *
1177  * Return Codes:
1178  *   DFX_K_SUCCESS - Adapter brought up successfully
1179  *   DFX_K_FAILURE - Adapter initialization failed
1180  *
1181  * Assumptions:
1182  *   bp->reset_type should be set to a valid reset type value before
1183  *   calling this routine.
1184  *
1185  * Side Effects:
1186  *   Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1187  *   upon a successful return of this routine.
1188  */
1189 
1190 static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1191 	{
1192 	DBG_printk("In dfx_adap_init...\n");
1193 
1194 	/* Disable PDQ interrupts first */
1195 
1196 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1197 
1198 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1199 
1200 	if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1201 		{
1202 		printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1203 		return DFX_K_FAILURE;
1204 		}
1205 
1206 	/*
1207 	 * When the PDQ is reset, some false Type 0 interrupts may be pending,
1208 	 * so we'll acknowledge all Type 0 interrupts now before continuing.
1209 	 */
1210 
1211 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1212 
1213 	/*
1214 	 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
1215 	 *
1216 	 * Note: We only need to clear host copies of these registers.  The PDQ reset
1217 	 *       takes care of the on-board register values.
1218 	 */
1219 
1220 	bp->cmd_req_reg.lword	= 0;
1221 	bp->cmd_rsp_reg.lword	= 0;
1222 	bp->rcv_xmt_reg.lword	= 0;
1223 
1224 	/* Clear consumer block before going to DMA_AVAILABLE state */
1225 
1226 	memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1227 
1228 	/* Initialize the DMA Burst Size */
1229 
1230 	if (dfx_hw_port_ctrl_req(bp,
1231 							PI_PCTRL_M_SUB_CMD,
1232 							PI_SUB_CMD_K_BURST_SIZE_SET,
1233 							bp->burst_size,
1234 							NULL) != DFX_K_SUCCESS)
1235 		{
1236 		printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1237 		return DFX_K_FAILURE;
1238 		}
1239 
1240 	/*
1241 	 * Set base address of Consumer Block
1242 	 *
1243 	 * Assumption: 32-bit physical address of consumer block is 64 byte
1244 	 *			   aligned.  That is, bits 0-5 of the address must be zero.
1245 	 */
1246 
1247 	if (dfx_hw_port_ctrl_req(bp,
1248 							PI_PCTRL_M_CONS_BLOCK,
1249 							bp->cons_block_phys,
1250 							0,
1251 							NULL) != DFX_K_SUCCESS)
1252 		{
1253 		printk("%s: Could not set consumer block address!\n", bp->dev->name);
1254 		return DFX_K_FAILURE;
1255 		}
1256 
1257 	/*
1258 	 * Set the base address of Descriptor Block and bring adapter
1259 	 * to DMA_AVAILABLE state.
1260 	 *
1261 	 * Note: We also set the literal and data swapping requirements
1262 	 *       in this command.
1263 	 *
1264 	 * Assumption: 32-bit physical address of descriptor block
1265 	 *       is 8Kbyte aligned.
1266 	 */
1267 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1268 				 (u32)(bp->descr_block_phys |
1269 				       PI_PDATA_A_INIT_M_BSWAP_INIT),
1270 				 0, NULL) != DFX_K_SUCCESS) {
1271 		printk("%s: Could not set descriptor block address!\n",
1272 		       bp->dev->name);
1273 		return DFX_K_FAILURE;
1274 	}
1275 
1276 	/* Set transmit flush timeout value */
1277 
1278 	bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1279 	bp->cmd_req_virt->char_set.item[0].item_code	= PI_ITEM_K_FLUSH_TIME;
1280 	bp->cmd_req_virt->char_set.item[0].value		= 3;	/* 3 seconds */
1281 	bp->cmd_req_virt->char_set.item[0].item_index	= 0;
1282 	bp->cmd_req_virt->char_set.item[1].item_code	= PI_ITEM_K_EOL;
1283 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1284 		{
1285 		printk("%s: DMA command request failed!\n", bp->dev->name);
1286 		return DFX_K_FAILURE;
1287 		}
1288 
1289 	/* Set the initial values for eFDXEnable and MACTReq MIB objects */
1290 
1291 	bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1292 	bp->cmd_req_virt->snmp_set.item[0].item_code	= PI_ITEM_K_FDX_ENB_DIS;
1293 	bp->cmd_req_virt->snmp_set.item[0].value		= bp->full_duplex_enb;
1294 	bp->cmd_req_virt->snmp_set.item[0].item_index	= 0;
1295 	bp->cmd_req_virt->snmp_set.item[1].item_code	= PI_ITEM_K_MAC_T_REQ;
1296 	bp->cmd_req_virt->snmp_set.item[1].value		= bp->req_ttrt;
1297 	bp->cmd_req_virt->snmp_set.item[1].item_index	= 0;
1298 	bp->cmd_req_virt->snmp_set.item[2].item_code	= PI_ITEM_K_EOL;
1299 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1300 		{
1301 		printk("%s: DMA command request failed!\n", bp->dev->name);
1302 		return DFX_K_FAILURE;
1303 		}
1304 
1305 	/* Initialize adapter CAM */
1306 
1307 	if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1308 		{
1309 		printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1310 		return DFX_K_FAILURE;
1311 		}
1312 
1313 	/* Initialize adapter filters */
1314 
1315 	if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1316 		{
1317 		printk("%s: Adapter filters update failed!\n", bp->dev->name);
1318 		return DFX_K_FAILURE;
1319 		}
1320 
1321 	/*
1322 	 * Remove any existing dynamic buffers (i.e. if the adapter is being
1323 	 * reinitialized)
1324 	 */
1325 
1326 	if (get_buffers)
1327 		dfx_rcv_flush(bp);
1328 
1329 	/* Initialize receive descriptor block and produce buffers */
1330 
1331 	if (dfx_rcv_init(bp, get_buffers))
1332 	        {
1333 		printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1334 		if (get_buffers)
1335 			dfx_rcv_flush(bp);
1336 		return DFX_K_FAILURE;
1337 		}
1338 
1339 	/* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
1340 
1341 	bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1342 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1343 		{
1344 		printk("%s: Start command failed\n", bp->dev->name);
1345 		if (get_buffers)
1346 			dfx_rcv_flush(bp);
1347 		return DFX_K_FAILURE;
1348 		}
1349 
1350 	/* Initialization succeeded, reenable PDQ interrupts */
1351 
1352 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1353 	return DFX_K_SUCCESS;
1354 	}
1355 
1356 
1357 /*
1358  * ============
1359  * = dfx_open =
1360  * ============
1361  *
1362  * Overview:
1363  *   Opens the adapter
1364  *
1365  * Returns:
1366  *   Condition code
1367  *
1368  * Arguments:
1369  *   dev - pointer to device information
1370  *
1371  * Functional Description:
1372  *   This function brings the adapter to an operational state.
1373  *
1374  * Return Codes:
1375  *   0		 - Adapter was successfully opened
1376  *   -EAGAIN - Could not register IRQ or adapter initialization failed
1377  *
1378  * Assumptions:
1379  *   This routine should only be called for a device that was
1380  *   initialized successfully.
1381  *
1382  * Side Effects:
1383  *   Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1384  *   if the open is successful.
1385  */
1386 
1387 static int dfx_open(struct net_device *dev)
1388 {
1389 	DFX_board_t *bp = netdev_priv(dev);
1390 	int ret;
1391 
1392 	DBG_printk("In dfx_open...\n");
1393 
1394 	/* Register IRQ - support shared interrupts by passing device ptr */
1395 
1396 	ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1397 			  dev);
1398 	if (ret) {
1399 		printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1400 		return ret;
1401 	}
1402 
1403 	/*
1404 	 * Set current address to factory MAC address
1405 	 *
1406 	 * Note: We've already done this step in dfx_driver_init.
1407 	 *       However, it's possible that a user has set a node
1408 	 *		 address override, then closed and reopened the
1409 	 *		 adapter.  Unless we reset the device address field
1410 	 *		 now, we'll continue to use the existing modified
1411 	 *		 address.
1412 	 */
1413 
1414 	memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1415 
1416 	/* Clear local unicast/multicast address tables and counts */
1417 
1418 	memset(bp->uc_table, 0, sizeof(bp->uc_table));
1419 	memset(bp->mc_table, 0, sizeof(bp->mc_table));
1420 	bp->uc_count = 0;
1421 	bp->mc_count = 0;
1422 
1423 	/* Disable promiscuous filter settings */
1424 
1425 	bp->ind_group_prom	= PI_FSTATE_K_BLOCK;
1426 	bp->group_prom		= PI_FSTATE_K_BLOCK;
1427 
1428 	spin_lock_init(&bp->lock);
1429 
1430 	/* Reset and initialize adapter */
1431 
1432 	bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST;	/* skip self-test */
1433 	if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1434 	{
1435 		printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1436 		free_irq(dev->irq, dev);
1437 		return -EAGAIN;
1438 	}
1439 
1440 	/* Set device structure info */
1441 	netif_start_queue(dev);
1442 	return 0;
1443 }
1444 
1445 
1446 /*
1447  * =============
1448  * = dfx_close =
1449  * =============
1450  *
1451  * Overview:
1452  *   Closes the device/module.
1453  *
1454  * Returns:
1455  *   Condition code
1456  *
1457  * Arguments:
1458  *   dev - pointer to device information
1459  *
1460  * Functional Description:
1461  *   This routine closes the adapter and brings it to a safe state.
1462  *   The interrupt service routine is deregistered with the OS.
1463  *   The adapter can be opened again with another call to dfx_open().
1464  *
1465  * Return Codes:
1466  *   Always return 0.
1467  *
1468  * Assumptions:
1469  *   No further requests for this adapter are made after this routine is
1470  *   called.  dfx_open() can be called to reset and reinitialize the
1471  *   adapter.
1472  *
1473  * Side Effects:
1474  *   Adapter should be in DMA_UNAVAILABLE state upon completion of this
1475  *   routine.
1476  */
1477 
1478 static int dfx_close(struct net_device *dev)
1479 {
1480 	DFX_board_t *bp = netdev_priv(dev);
1481 
1482 	DBG_printk("In dfx_close...\n");
1483 
1484 	/* Disable PDQ interrupts first */
1485 
1486 	dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1487 
1488 	/* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1489 
1490 	(void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1491 
1492 	/*
1493 	 * Flush any pending transmit buffers
1494 	 *
1495 	 * Note: It's important that we flush the transmit buffers
1496 	 *		 BEFORE we clear our copy of the Type 2 register.
1497 	 *		 Otherwise, we'll have no idea how many buffers
1498 	 *		 we need to free.
1499 	 */
1500 
1501 	dfx_xmt_flush(bp);
1502 
1503 	/*
1504 	 * Clear Type 1 and Type 2 registers after adapter reset
1505 	 *
1506 	 * Note: Even though we're closing the adapter, it's
1507 	 *       possible that an interrupt will occur after
1508 	 *		 dfx_close is called.  Without some assurance to
1509 	 *		 the contrary we want to make sure that we don't
1510 	 *		 process receive and transmit LLC frames and update
1511 	 *		 the Type 2 register with bad information.
1512 	 */
1513 
1514 	bp->cmd_req_reg.lword	= 0;
1515 	bp->cmd_rsp_reg.lword	= 0;
1516 	bp->rcv_xmt_reg.lword	= 0;
1517 
1518 	/* Clear consumer block for the same reason given above */
1519 
1520 	memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1521 
1522 	/* Release all dynamically allocate skb in the receive ring. */
1523 
1524 	dfx_rcv_flush(bp);
1525 
1526 	/* Clear device structure flags */
1527 
1528 	netif_stop_queue(dev);
1529 
1530 	/* Deregister (free) IRQ */
1531 
1532 	free_irq(dev->irq, dev);
1533 
1534 	return 0;
1535 }
1536 
1537 
1538 /*
1539  * ======================
1540  * = dfx_int_pr_halt_id =
1541  * ======================
1542  *
1543  * Overview:
1544  *   Displays halt id's in string form.
1545  *
1546  * Returns:
1547  *   None
1548  *
1549  * Arguments:
1550  *   bp - pointer to board information
1551  *
1552  * Functional Description:
1553  *   Determine current halt id and display appropriate string.
1554  *
1555  * Return Codes:
1556  *   None
1557  *
1558  * Assumptions:
1559  *   None
1560  *
1561  * Side Effects:
1562  *   None
1563  */
1564 
1565 static void dfx_int_pr_halt_id(DFX_board_t	*bp)
1566 	{
1567 	PI_UINT32	port_status;			/* PDQ port status register value */
1568 	PI_UINT32	halt_id;				/* PDQ port status halt ID */
1569 
1570 	/* Read the latest port status */
1571 
1572 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1573 
1574 	/* Display halt state transition information */
1575 
1576 	halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1577 	switch (halt_id)
1578 		{
1579 		case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1580 			printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1581 			break;
1582 
1583 		case PI_HALT_ID_K_PARITY_ERROR:
1584 			printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1585 			break;
1586 
1587 		case PI_HALT_ID_K_HOST_DIR_HALT:
1588 			printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1589 			break;
1590 
1591 		case PI_HALT_ID_K_SW_FAULT:
1592 			printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1593 			break;
1594 
1595 		case PI_HALT_ID_K_HW_FAULT:
1596 			printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1597 			break;
1598 
1599 		case PI_HALT_ID_K_PC_TRACE:
1600 			printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1601 			break;
1602 
1603 		case PI_HALT_ID_K_DMA_ERROR:
1604 			printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1605 			break;
1606 
1607 		case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1608 			printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1609 			break;
1610 
1611 		case PI_HALT_ID_K_BUS_EXCEPTION:
1612 			printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1613 			break;
1614 
1615 		default:
1616 			printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1617 			break;
1618 		}
1619 	}
1620 
1621 
1622 /*
1623  * ==========================
1624  * = dfx_int_type_0_process =
1625  * ==========================
1626  *
1627  * Overview:
1628  *   Processes Type 0 interrupts.
1629  *
1630  * Returns:
1631  *   None
1632  *
1633  * Arguments:
1634  *   bp - pointer to board information
1635  *
1636  * Functional Description:
1637  *   Processes all enabled Type 0 interrupts.  If the reason for the interrupt
1638  *   is a serious fault on the adapter, then an error message is displayed
1639  *   and the adapter is reset.
1640  *
1641  *   One tricky potential timing window is the rapid succession of "link avail"
1642  *   "link unavail" state change interrupts.  The acknowledgement of the Type 0
1643  *   interrupt must be done before reading the state from the Port Status
1644  *   register.  This is true because a state change could occur after reading
1645  *   the data, but before acknowledging the interrupt.  If this state change
1646  *   does happen, it would be lost because the driver is using the old state,
1647  *   and it will never know about the new state because it subsequently
1648  *   acknowledges the state change interrupt.
1649  *
1650  *          INCORRECT                                      CORRECT
1651  *      read type 0 int reasons                   read type 0 int reasons
1652  *      read adapter state                        ack type 0 interrupts
1653  *      ack type 0 interrupts                     read adapter state
1654  *      ... process interrupt ...                 ... process interrupt ...
1655  *
1656  * Return Codes:
1657  *   None
1658  *
1659  * Assumptions:
1660  *   None
1661  *
1662  * Side Effects:
1663  *   An adapter reset may occur if the adapter has any Type 0 error interrupts
1664  *   or if the port status indicates that the adapter is halted.  The driver
1665  *   is responsible for reinitializing the adapter with the current CAM
1666  *   contents and adapter filter settings.
1667  */
1668 
1669 static void dfx_int_type_0_process(DFX_board_t	*bp)
1670 
1671 	{
1672 	PI_UINT32	type_0_status;		/* Host Interrupt Type 0 register */
1673 	PI_UINT32	state;				/* current adap state (from port status) */
1674 
1675 	/*
1676 	 * Read host interrupt Type 0 register to determine which Type 0
1677 	 * interrupts are pending.  Immediately write it back out to clear
1678 	 * those interrupts.
1679 	 */
1680 
1681 	dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1682 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1683 
1684 	/* Check for Type 0 error interrupts */
1685 
1686 	if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1687 							PI_TYPE_0_STAT_M_PM_PAR_ERR |
1688 							PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1689 		{
1690 		/* Check for Non-Existent Memory error */
1691 
1692 		if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1693 			printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1694 
1695 		/* Check for Packet Memory Parity error */
1696 
1697 		if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1698 			printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1699 
1700 		/* Check for Host Bus Parity error */
1701 
1702 		if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1703 			printk("%s: Host Bus Parity Error\n", bp->dev->name);
1704 
1705 		/* Reset adapter and bring it back on-line */
1706 
1707 		bp->link_available = PI_K_FALSE;	/* link is no longer available */
1708 		bp->reset_type = 0;					/* rerun on-board diagnostics */
1709 		printk("%s: Resetting adapter...\n", bp->dev->name);
1710 		if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1711 			{
1712 			printk("%s: Adapter reset failed!  Disabling adapter interrupts.\n", bp->dev->name);
1713 			dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1714 			return;
1715 			}
1716 		printk("%s: Adapter reset successful!\n", bp->dev->name);
1717 		return;
1718 		}
1719 
1720 	/* Check for transmit flush interrupt */
1721 
1722 	if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1723 		{
1724 		/* Flush any pending xmt's and acknowledge the flush interrupt */
1725 
1726 		bp->link_available = PI_K_FALSE;		/* link is no longer available */
1727 		dfx_xmt_flush(bp);						/* flush any outstanding packets */
1728 		(void) dfx_hw_port_ctrl_req(bp,
1729 									PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1730 									0,
1731 									0,
1732 									NULL);
1733 		}
1734 
1735 	/* Check for adapter state change */
1736 
1737 	if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1738 		{
1739 		/* Get latest adapter state */
1740 
1741 		state = dfx_hw_adap_state_rd(bp);	/* get adapter state */
1742 		if (state == PI_STATE_K_HALTED)
1743 			{
1744 			/*
1745 			 * Adapter has transitioned to HALTED state, try to reset
1746 			 * adapter to bring it back on-line.  If reset fails,
1747 			 * leave the adapter in the broken state.
1748 			 */
1749 
1750 			printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1751 			dfx_int_pr_halt_id(bp);			/* display halt id as string */
1752 
1753 			/* Reset adapter and bring it back on-line */
1754 
1755 			bp->link_available = PI_K_FALSE;	/* link is no longer available */
1756 			bp->reset_type = 0;					/* rerun on-board diagnostics */
1757 			printk("%s: Resetting adapter...\n", bp->dev->name);
1758 			if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1759 				{
1760 				printk("%s: Adapter reset failed!  Disabling adapter interrupts.\n", bp->dev->name);
1761 				dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1762 				return;
1763 				}
1764 			printk("%s: Adapter reset successful!\n", bp->dev->name);
1765 			}
1766 		else if (state == PI_STATE_K_LINK_AVAIL)
1767 			{
1768 			bp->link_available = PI_K_TRUE;		/* set link available flag */
1769 			}
1770 		}
1771 	}
1772 
1773 
1774 /*
1775  * ==================
1776  * = dfx_int_common =
1777  * ==================
1778  *
1779  * Overview:
1780  *   Interrupt service routine (ISR)
1781  *
1782  * Returns:
1783  *   None
1784  *
1785  * Arguments:
1786  *   bp - pointer to board information
1787  *
1788  * Functional Description:
1789  *   This is the ISR which processes incoming adapter interrupts.
1790  *
1791  * Return Codes:
1792  *   None
1793  *
1794  * Assumptions:
1795  *   This routine assumes PDQ interrupts have not been disabled.
1796  *   When interrupts are disabled at the PDQ, the Port Status register
1797  *   is automatically cleared.  This routine uses the Port Status
1798  *   register value to determine whether a Type 0 interrupt occurred,
1799  *   so it's important that adapter interrupts are not normally
1800  *   enabled/disabled at the PDQ.
1801  *
1802  *   It's vital that this routine is NOT reentered for the
1803  *   same board and that the OS is not in another section of
1804  *   code (eg. dfx_xmt_queue_pkt) for the same board on a
1805  *   different thread.
1806  *
1807  * Side Effects:
1808  *   Pending interrupts are serviced.  Depending on the type of
1809  *   interrupt, acknowledging and clearing the interrupt at the
1810  *   PDQ involves writing a register to clear the interrupt bit
1811  *   or updating completion indices.
1812  */
1813 
1814 static void dfx_int_common(struct net_device *dev)
1815 {
1816 	DFX_board_t *bp = netdev_priv(dev);
1817 	PI_UINT32	port_status;		/* Port Status register */
1818 
1819 	/* Process xmt interrupts - frequent case, so always call this routine */
1820 
1821 	if(dfx_xmt_done(bp))				/* free consumed xmt packets */
1822 		netif_wake_queue(dev);
1823 
1824 	/* Process rcv interrupts - frequent case, so always call this routine */
1825 
1826 	dfx_rcv_queue_process(bp);		/* service received LLC frames */
1827 
1828 	/*
1829 	 * Transmit and receive producer and completion indices are updated on the
1830 	 * adapter by writing to the Type 2 Producer register.  Since the frequent
1831 	 * case is that we'll be processing either LLC transmit or receive buffers,
1832 	 * we'll optimize I/O writes by doing a single register write here.
1833 	 */
1834 
1835 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1836 
1837 	/* Read PDQ Port Status register to find out which interrupts need processing */
1838 
1839 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1840 
1841 	/* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
1842 
1843 	if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1844 		dfx_int_type_0_process(bp);	/* process Type 0 interrupts */
1845 	}
1846 
1847 
1848 /*
1849  * =================
1850  * = dfx_interrupt =
1851  * =================
1852  *
1853  * Overview:
1854  *   Interrupt processing routine
1855  *
1856  * Returns:
1857  *   Whether a valid interrupt was seen.
1858  *
1859  * Arguments:
1860  *   irq	- interrupt vector
1861  *   dev_id	- pointer to device information
1862  *
1863  * Functional Description:
1864  *   This routine calls the interrupt processing routine for this adapter.  It
1865  *   disables and reenables adapter interrupts, as appropriate.  We can support
1866  *   shared interrupts since the incoming dev_id pointer provides our device
1867  *   structure context.
1868  *
1869  * Return Codes:
1870  *   IRQ_HANDLED - an IRQ was handled.
1871  *   IRQ_NONE    - no IRQ was handled.
1872  *
1873  * Assumptions:
1874  *   The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
1875  *   on Intel-based systems) is done by the operating system outside this
1876  *   routine.
1877  *
1878  *	 System interrupts are enabled through this call.
1879  *
1880  * Side Effects:
1881  *   Interrupts are disabled, then reenabled at the adapter.
1882  */
1883 
1884 static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1885 {
1886 	struct net_device *dev = dev_id;
1887 	DFX_board_t *bp = netdev_priv(dev);
1888 	struct device *bdev = bp->bus_dev;
1889 	int dfx_bus_pci = dev_is_pci(bdev);
1890 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1891 	int dfx_bus_tc = DFX_BUS_TC(bdev);
1892 
1893 	/* Service adapter interrupts */
1894 
1895 	if (dfx_bus_pci) {
1896 		u32 status;
1897 
1898 		dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1899 		if (!(status & PFI_STATUS_M_PDQ_INT))
1900 			return IRQ_NONE;
1901 
1902 		spin_lock(&bp->lock);
1903 
1904 		/* Disable PDQ-PFI interrupts at PFI */
1905 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1906 				    PFI_MODE_M_DMA_ENB);
1907 
1908 		/* Call interrupt service routine for this adapter */
1909 		dfx_int_common(dev);
1910 
1911 		/* Clear PDQ interrupt status bit and reenable interrupts */
1912 		dfx_port_write_long(bp, PFI_K_REG_STATUS,
1913 				    PFI_STATUS_M_PDQ_INT);
1914 		dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1915 				    (PFI_MODE_M_PDQ_INT_ENB |
1916 				     PFI_MODE_M_DMA_ENB));
1917 
1918 		spin_unlock(&bp->lock);
1919 	}
1920 	if (dfx_bus_eisa) {
1921 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1922 		u8 status;
1923 
1924 		status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1925 		if (!(status & PI_CONFIG_STAT_0_M_PEND))
1926 			return IRQ_NONE;
1927 
1928 		spin_lock(&bp->lock);
1929 
1930 		/* Disable interrupts at the ESIC */
1931 		status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1932 		outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1933 
1934 		/* Call interrupt service routine for this adapter */
1935 		dfx_int_common(dev);
1936 
1937 		/* Reenable interrupts at the ESIC */
1938 		status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1939 		status |= PI_CONFIG_STAT_0_M_INT_ENB;
1940 		outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1941 
1942 		spin_unlock(&bp->lock);
1943 	}
1944 	if (dfx_bus_tc) {
1945 		u32 status;
1946 
1947 		dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
1948 		if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
1949 				PI_PSTATUS_M_XMT_DATA_PENDING |
1950 				PI_PSTATUS_M_SMT_HOST_PENDING |
1951 				PI_PSTATUS_M_UNSOL_PENDING |
1952 				PI_PSTATUS_M_CMD_RSP_PENDING |
1953 				PI_PSTATUS_M_CMD_REQ_PENDING |
1954 				PI_PSTATUS_M_TYPE_0_PENDING)))
1955 			return IRQ_NONE;
1956 
1957 		spin_lock(&bp->lock);
1958 
1959 		/* Call interrupt service routine for this adapter */
1960 		dfx_int_common(dev);
1961 
1962 		spin_unlock(&bp->lock);
1963 	}
1964 
1965 	return IRQ_HANDLED;
1966 }
1967 
1968 
1969 /*
1970  * =====================
1971  * = dfx_ctl_get_stats =
1972  * =====================
1973  *
1974  * Overview:
1975  *   Get statistics for FDDI adapter
1976  *
1977  * Returns:
1978  *   Pointer to FDDI statistics structure
1979  *
1980  * Arguments:
1981  *   dev - pointer to device information
1982  *
1983  * Functional Description:
1984  *   Gets current MIB objects from adapter, then
1985  *   returns FDDI statistics structure as defined
1986  *   in if_fddi.h.
1987  *
1988  *   Note: Since the FDDI statistics structure is
1989  *   still new and the device structure doesn't
1990  *   have an FDDI-specific get statistics handler,
1991  *   we'll return the FDDI statistics structure as
1992  *   a pointer to an Ethernet statistics structure.
1993  *   That way, at least the first part of the statistics
1994  *   structure can be decoded properly, and it allows
1995  *   "smart" applications to perform a second cast to
1996  *   decode the FDDI-specific statistics.
1997  *
1998  *   We'll have to pay attention to this routine as the
1999  *   device structure becomes more mature and LAN media
2000  *   independent.
2001  *
2002  * Return Codes:
2003  *   None
2004  *
2005  * Assumptions:
2006  *   None
2007  *
2008  * Side Effects:
2009  *   None
2010  */
2011 
2012 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2013 	{
2014 	DFX_board_t *bp = netdev_priv(dev);
2015 
2016 	/* Fill the bp->stats structure with driver-maintained counters */
2017 
2018 	bp->stats.gen.rx_packets = bp->rcv_total_frames;
2019 	bp->stats.gen.tx_packets = bp->xmt_total_frames;
2020 	bp->stats.gen.rx_bytes   = bp->rcv_total_bytes;
2021 	bp->stats.gen.tx_bytes   = bp->xmt_total_bytes;
2022 	bp->stats.gen.rx_errors  = bp->rcv_crc_errors +
2023 				   bp->rcv_frame_status_errors +
2024 				   bp->rcv_length_errors;
2025 	bp->stats.gen.tx_errors  = bp->xmt_length_errors;
2026 	bp->stats.gen.rx_dropped = bp->rcv_discards;
2027 	bp->stats.gen.tx_dropped = bp->xmt_discards;
2028 	bp->stats.gen.multicast  = bp->rcv_multicast_frames;
2029 	bp->stats.gen.collisions = 0;		/* always zero (0) for FDDI */
2030 
2031 	/* Get FDDI SMT MIB objects */
2032 
2033 	bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2034 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2035 		return (struct net_device_stats *)&bp->stats;
2036 
2037 	/* Fill the bp->stats structure with the SMT MIB object values */
2038 
2039 	memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2040 	bp->stats.smt_op_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2041 	bp->stats.smt_hi_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2042 	bp->stats.smt_lo_version_id					= bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2043 	memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2044 	bp->stats.smt_mib_version_id				= bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2045 	bp->stats.smt_mac_cts						= bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2046 	bp->stats.smt_non_master_cts				= bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2047 	bp->stats.smt_master_cts					= bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2048 	bp->stats.smt_available_paths				= bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2049 	bp->stats.smt_config_capabilities			= bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2050 	bp->stats.smt_config_policy					= bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2051 	bp->stats.smt_connection_policy				= bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2052 	bp->stats.smt_t_notify						= bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2053 	bp->stats.smt_stat_rpt_policy				= bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2054 	bp->stats.smt_trace_max_expiration			= bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2055 	bp->stats.smt_bypass_present				= bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2056 	bp->stats.smt_ecm_state						= bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2057 	bp->stats.smt_cf_state						= bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2058 	bp->stats.smt_remote_disconnect_flag		= bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2059 	bp->stats.smt_station_status				= bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2060 	bp->stats.smt_peer_wrap_flag				= bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2061 	bp->stats.smt_time_stamp					= bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2062 	bp->stats.smt_transition_time_stamp			= bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2063 	bp->stats.mac_frame_status_functions		= bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2064 	bp->stats.mac_t_max_capability				= bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2065 	bp->stats.mac_tvx_capability				= bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2066 	bp->stats.mac_available_paths				= bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2067 	bp->stats.mac_current_path					= bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2068 	memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2069 	memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2070 	memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2071 	memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2072 	bp->stats.mac_dup_address_test				= bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2073 	bp->stats.mac_requested_paths				= bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2074 	bp->stats.mac_downstream_port_type			= bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2075 	memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2076 	bp->stats.mac_t_req							= bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2077 	bp->stats.mac_t_neg							= bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2078 	bp->stats.mac_t_max							= bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2079 	bp->stats.mac_tvx_value						= bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2080 	bp->stats.mac_frame_error_threshold			= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2081 	bp->stats.mac_frame_error_ratio				= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2082 	bp->stats.mac_rmt_state						= bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2083 	bp->stats.mac_da_flag						= bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2084 	bp->stats.mac_una_da_flag					= bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2085 	bp->stats.mac_frame_error_flag				= bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2086 	bp->stats.mac_ma_unitdata_available			= bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2087 	bp->stats.mac_hardware_present				= bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2088 	bp->stats.mac_ma_unitdata_enable			= bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2089 	bp->stats.path_tvx_lower_bound				= bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2090 	bp->stats.path_t_max_lower_bound			= bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2091 	bp->stats.path_max_t_req					= bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2092 	memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2093 	bp->stats.port_my_type[0]					= bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2094 	bp->stats.port_my_type[1]					= bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2095 	bp->stats.port_neighbor_type[0]				= bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2096 	bp->stats.port_neighbor_type[1]				= bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2097 	bp->stats.port_connection_policies[0]		= bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2098 	bp->stats.port_connection_policies[1]		= bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2099 	bp->stats.port_mac_indicated[0]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2100 	bp->stats.port_mac_indicated[1]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2101 	bp->stats.port_current_path[0]				= bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2102 	bp->stats.port_current_path[1]				= bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2103 	memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2104 	memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2105 	bp->stats.port_mac_placement[0]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2106 	bp->stats.port_mac_placement[1]				= bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2107 	bp->stats.port_available_paths[0]			= bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2108 	bp->stats.port_available_paths[1]			= bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2109 	bp->stats.port_pmd_class[0]					= bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2110 	bp->stats.port_pmd_class[1]					= bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2111 	bp->stats.port_connection_capabilities[0]	= bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2112 	bp->stats.port_connection_capabilities[1]	= bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2113 	bp->stats.port_bs_flag[0]					= bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2114 	bp->stats.port_bs_flag[1]					= bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2115 	bp->stats.port_ler_estimate[0]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2116 	bp->stats.port_ler_estimate[1]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2117 	bp->stats.port_ler_cutoff[0]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2118 	bp->stats.port_ler_cutoff[1]				= bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2119 	bp->stats.port_ler_alarm[0]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2120 	bp->stats.port_ler_alarm[1]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2121 	bp->stats.port_connect_state[0]				= bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2122 	bp->stats.port_connect_state[1]				= bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2123 	bp->stats.port_pcm_state[0]					= bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2124 	bp->stats.port_pcm_state[1]					= bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2125 	bp->stats.port_pc_withhold[0]				= bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2126 	bp->stats.port_pc_withhold[1]				= bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2127 	bp->stats.port_ler_flag[0]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2128 	bp->stats.port_ler_flag[1]					= bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2129 	bp->stats.port_hardware_present[0]			= bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2130 	bp->stats.port_hardware_present[1]			= bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2131 
2132 	/* Get FDDI counters */
2133 
2134 	bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2135 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2136 		return (struct net_device_stats *)&bp->stats;
2137 
2138 	/* Fill the bp->stats structure with the FDDI counter values */
2139 
2140 	bp->stats.mac_frame_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2141 	bp->stats.mac_copied_cts			= bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2142 	bp->stats.mac_transmit_cts			= bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2143 	bp->stats.mac_error_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2144 	bp->stats.mac_lost_cts				= bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2145 	bp->stats.port_lct_fail_cts[0]		= bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2146 	bp->stats.port_lct_fail_cts[1]		= bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2147 	bp->stats.port_lem_reject_cts[0]	= bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2148 	bp->stats.port_lem_reject_cts[1]	= bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2149 	bp->stats.port_lem_cts[0]			= bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2150 	bp->stats.port_lem_cts[1]			= bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2151 
2152 	return (struct net_device_stats *)&bp->stats;
2153 	}
2154 
2155 
2156 /*
2157  * ==============================
2158  * = dfx_ctl_set_multicast_list =
2159  * ==============================
2160  *
2161  * Overview:
2162  *   Enable/Disable LLC frame promiscuous mode reception
2163  *   on the adapter and/or update multicast address table.
2164  *
2165  * Returns:
2166  *   None
2167  *
2168  * Arguments:
2169  *   dev - pointer to device information
2170  *
2171  * Functional Description:
2172  *   This routine follows a fairly simple algorithm for setting the
2173  *   adapter filters and CAM:
2174  *
2175  *		if IFF_PROMISC flag is set
2176  *			enable LLC individual/group promiscuous mode
2177  *		else
2178  *			disable LLC individual/group promiscuous mode
2179  *			if number of incoming multicast addresses >
2180  *					(CAM max size - number of unicast addresses in CAM)
2181  *				enable LLC group promiscuous mode
2182  *				set driver-maintained multicast address count to zero
2183  *			else
2184  *				disable LLC group promiscuous mode
2185  *				set driver-maintained multicast address count to incoming count
2186  *			update adapter CAM
2187  *		update adapter filters
2188  *
2189  * Return Codes:
2190  *   None
2191  *
2192  * Assumptions:
2193  *   Multicast addresses are presented in canonical (LSB) format.
2194  *
2195  * Side Effects:
2196  *   On-board adapter CAM and filters are updated.
2197  */
2198 
2199 static void dfx_ctl_set_multicast_list(struct net_device *dev)
2200 {
2201 	DFX_board_t *bp = netdev_priv(dev);
2202 	int					i;			/* used as index in for loop */
2203 	struct netdev_hw_addr *ha;
2204 
2205 	/* Enable LLC frame promiscuous mode, if necessary */
2206 
2207 	if (dev->flags & IFF_PROMISC)
2208 		bp->ind_group_prom = PI_FSTATE_K_PASS;		/* Enable LLC ind/group prom mode */
2209 
2210 	/* Else, update multicast address table */
2211 
2212 	else
2213 		{
2214 		bp->ind_group_prom = PI_FSTATE_K_BLOCK;		/* Disable LLC ind/group prom mode */
2215 		/*
2216 		 * Check whether incoming multicast address count exceeds table size
2217 		 *
2218 		 * Note: The adapters utilize an on-board 64 entry CAM for
2219 		 *       supporting perfect filtering of multicast packets
2220 		 *		 and bridge functions when adding unicast addresses.
2221 		 *		 There is no hash function available.  To support
2222 		 *		 additional multicast addresses, the all multicast
2223 		 *		 filter (LLC group promiscuous mode) must be enabled.
2224 		 *
2225 		 *		 The firmware reserves two CAM entries for SMT-related
2226 		 *		 multicast addresses, which leaves 62 entries available.
2227 		 *		 The following code ensures that we're not being asked
2228 		 *		 to add more than 62 addresses to the CAM.  If we are,
2229 		 *		 the driver will enable the all multicast filter.
2230 		 *		 Should the number of multicast addresses drop below
2231 		 *		 the high water mark, the filter will be disabled and
2232 		 *		 perfect filtering will be used.
2233 		 */
2234 
2235 		if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2236 			{
2237 			bp->group_prom	= PI_FSTATE_K_PASS;		/* Enable LLC group prom mode */
2238 			bp->mc_count	= 0;					/* Don't add mc addrs to CAM */
2239 			}
2240 		else
2241 			{
2242 			bp->group_prom	= PI_FSTATE_K_BLOCK;	/* Disable LLC group prom mode */
2243 			bp->mc_count	= netdev_mc_count(dev);		/* Add mc addrs to CAM */
2244 			}
2245 
2246 		/* Copy addresses to multicast address table, then update adapter CAM */
2247 
2248 		i = 0;
2249 		netdev_for_each_mc_addr(ha, dev)
2250 			memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2251 			       ha->addr, FDDI_K_ALEN);
2252 
2253 		if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2254 			{
2255 			DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2256 			}
2257 		else
2258 			{
2259 			DBG_printk("%s: Multicast address table updated!  Added %d addresses.\n", dev->name, bp->mc_count);
2260 			}
2261 		}
2262 
2263 	/* Update adapter filters */
2264 
2265 	if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2266 		{
2267 		DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2268 		}
2269 	else
2270 		{
2271 		DBG_printk("%s: Adapter filters updated!\n", dev->name);
2272 		}
2273 	}
2274 
2275 
2276 /*
2277  * ===========================
2278  * = dfx_ctl_set_mac_address =
2279  * ===========================
2280  *
2281  * Overview:
2282  *   Add node address override (unicast address) to adapter
2283  *   CAM and update dev_addr field in device table.
2284  *
2285  * Returns:
2286  *   None
2287  *
2288  * Arguments:
2289  *   dev  - pointer to device information
2290  *   addr - pointer to sockaddr structure containing unicast address to add
2291  *
2292  * Functional Description:
2293  *   The adapter supports node address overrides by adding one or more
2294  *   unicast addresses to the adapter CAM.  This is similar to adding
2295  *   multicast addresses.  In this routine we'll update the driver and
2296  *   device structures with the new address, then update the adapter CAM
2297  *   to ensure that the adapter will copy and strip frames destined and
2298  *   sourced by that address.
2299  *
2300  * Return Codes:
2301  *   Always returns zero.
2302  *
2303  * Assumptions:
2304  *   The address pointed to by addr->sa_data is a valid unicast
2305  *   address and is presented in canonical (LSB) format.
2306  *
2307  * Side Effects:
2308  *   On-board adapter CAM is updated.  On-board adapter filters
2309  *   may be updated.
2310  */
2311 
2312 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2313 	{
2314 	struct sockaddr	*p_sockaddr = (struct sockaddr *)addr;
2315 	DFX_board_t *bp = netdev_priv(dev);
2316 
2317 	/* Copy unicast address to driver-maintained structs and update count */
2318 
2319 	memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);	/* update device struct */
2320 	memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN);	/* update driver struct */
2321 	bp->uc_count = 1;
2322 
2323 	/*
2324 	 * Verify we're not exceeding the CAM size by adding unicast address
2325 	 *
2326 	 * Note: It's possible that before entering this routine we've
2327 	 *       already filled the CAM with 62 multicast addresses.
2328 	 *		 Since we need to place the node address override into
2329 	 *		 the CAM, we have to check to see that we're not
2330 	 *		 exceeding the CAM size.  If we are, we have to enable
2331 	 *		 the LLC group (multicast) promiscuous mode filter as
2332 	 *		 in dfx_ctl_set_multicast_list.
2333 	 */
2334 
2335 	if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2336 		{
2337 		bp->group_prom	= PI_FSTATE_K_PASS;		/* Enable LLC group prom mode */
2338 		bp->mc_count	= 0;					/* Don't add mc addrs to CAM */
2339 
2340 		/* Update adapter filters */
2341 
2342 		if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2343 			{
2344 			DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2345 			}
2346 		else
2347 			{
2348 			DBG_printk("%s: Adapter filters updated!\n", dev->name);
2349 			}
2350 		}
2351 
2352 	/* Update adapter CAM with new unicast address */
2353 
2354 	if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2355 		{
2356 		DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2357 		}
2358 	else
2359 		{
2360 		DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2361 		}
2362 	return 0;			/* always return zero */
2363 	}
2364 
2365 
2366 /*
2367  * ======================
2368  * = dfx_ctl_update_cam =
2369  * ======================
2370  *
2371  * Overview:
2372  *   Procedure to update adapter CAM (Content Addressable Memory)
2373  *   with desired unicast and multicast address entries.
2374  *
2375  * Returns:
2376  *   Condition code
2377  *
2378  * Arguments:
2379  *   bp - pointer to board information
2380  *
2381  * Functional Description:
2382  *   Updates adapter CAM with current contents of board structure
2383  *   unicast and multicast address tables.  Since there are only 62
2384  *   free entries in CAM, this routine ensures that the command
2385  *   request buffer is not overrun.
2386  *
2387  * Return Codes:
2388  *   DFX_K_SUCCESS - Request succeeded
2389  *   DFX_K_FAILURE - Request failed
2390  *
2391  * Assumptions:
2392  *   All addresses being added (unicast and multicast) are in canonical
2393  *   order.
2394  *
2395  * Side Effects:
2396  *   On-board adapter CAM is updated.
2397  */
2398 
2399 static int dfx_ctl_update_cam(DFX_board_t *bp)
2400 	{
2401 	int			i;				/* used as index */
2402 	PI_LAN_ADDR	*p_addr;		/* pointer to CAM entry */
2403 
2404 	/*
2405 	 * Fill in command request information
2406 	 *
2407 	 * Note: Even though both the unicast and multicast address
2408 	 *       table entries are stored as contiguous 6 byte entries,
2409 	 *		 the firmware address filter set command expects each
2410 	 *		 entry to be two longwords (8 bytes total).  We must be
2411 	 *		 careful to only copy the six bytes of each unicast and
2412 	 *		 multicast table entry into each command entry.  This
2413 	 *		 is also why we must first clear the entire command
2414 	 *		 request buffer.
2415 	 */
2416 
2417 	memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX);	/* first clear buffer */
2418 	bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2419 	p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2420 
2421 	/* Now add unicast addresses to command request buffer, if any */
2422 
2423 	for (i=0; i < (int)bp->uc_count; i++)
2424 		{
2425 		if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2426 			{
2427 			memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2428 			p_addr++;			/* point to next command entry */
2429 			}
2430 		}
2431 
2432 	/* Now add multicast addresses to command request buffer, if any */
2433 
2434 	for (i=0; i < (int)bp->mc_count; i++)
2435 		{
2436 		if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2437 			{
2438 			memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2439 			p_addr++;			/* point to next command entry */
2440 			}
2441 		}
2442 
2443 	/* Issue command to update adapter CAM, then return */
2444 
2445 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2446 		return DFX_K_FAILURE;
2447 	return DFX_K_SUCCESS;
2448 	}
2449 
2450 
2451 /*
2452  * ==========================
2453  * = dfx_ctl_update_filters =
2454  * ==========================
2455  *
2456  * Overview:
2457  *   Procedure to update adapter filters with desired
2458  *   filter settings.
2459  *
2460  * Returns:
2461  *   Condition code
2462  *
2463  * Arguments:
2464  *   bp - pointer to board information
2465  *
2466  * Functional Description:
2467  *   Enables or disables filter using current filter settings.
2468  *
2469  * Return Codes:
2470  *   DFX_K_SUCCESS - Request succeeded.
2471  *   DFX_K_FAILURE - Request failed.
2472  *
2473  * Assumptions:
2474  *   We must always pass up packets destined to the broadcast
2475  *   address (FF-FF-FF-FF-FF-FF), so we'll always keep the
2476  *   broadcast filter enabled.
2477  *
2478  * Side Effects:
2479  *   On-board adapter filters are updated.
2480  */
2481 
2482 static int dfx_ctl_update_filters(DFX_board_t *bp)
2483 	{
2484 	int	i = 0;					/* used as index */
2485 
2486 	/* Fill in command request information */
2487 
2488 	bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2489 
2490 	/* Initialize Broadcast filter - * ALWAYS ENABLED * */
2491 
2492 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_BROADCAST;
2493 	bp->cmd_req_virt->filter_set.item[i++].value	= PI_FSTATE_K_PASS;
2494 
2495 	/* Initialize LLC Individual/Group Promiscuous filter */
2496 
2497 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_IND_GROUP_PROM;
2498 	bp->cmd_req_virt->filter_set.item[i++].value	= bp->ind_group_prom;
2499 
2500 	/* Initialize LLC Group Promiscuous filter */
2501 
2502 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_GROUP_PROM;
2503 	bp->cmd_req_virt->filter_set.item[i++].value	= bp->group_prom;
2504 
2505 	/* Terminate the item code list */
2506 
2507 	bp->cmd_req_virt->filter_set.item[i].item_code	= PI_ITEM_K_EOL;
2508 
2509 	/* Issue command to update adapter filters, then return */
2510 
2511 	if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2512 		return DFX_K_FAILURE;
2513 	return DFX_K_SUCCESS;
2514 	}
2515 
2516 
2517 /*
2518  * ======================
2519  * = dfx_hw_dma_cmd_req =
2520  * ======================
2521  *
2522  * Overview:
2523  *   Sends PDQ DMA command to adapter firmware
2524  *
2525  * Returns:
2526  *   Condition code
2527  *
2528  * Arguments:
2529  *   bp - pointer to board information
2530  *
2531  * Functional Description:
2532  *   The command request and response buffers are posted to the adapter in the manner
2533  *   described in the PDQ Port Specification:
2534  *
2535  *		1. Command Response Buffer is posted to adapter.
2536  *		2. Command Request Buffer is posted to adapter.
2537  *		3. Command Request consumer index is polled until it indicates that request
2538  *         buffer has been DMA'd to adapter.
2539  *		4. Command Response consumer index is polled until it indicates that response
2540  *         buffer has been DMA'd from adapter.
2541  *
2542  *   This ordering ensures that a response buffer is already available for the firmware
2543  *   to use once it's done processing the request buffer.
2544  *
2545  * Return Codes:
2546  *   DFX_K_SUCCESS	  - DMA command succeeded
2547  * 	 DFX_K_OUTSTATE   - Adapter is NOT in proper state
2548  *   DFX_K_HW_TIMEOUT - DMA command timed out
2549  *
2550  * Assumptions:
2551  *   Command request buffer has already been filled with desired DMA command.
2552  *
2553  * Side Effects:
2554  *   None
2555  */
2556 
2557 static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2558 	{
2559 	int status;			/* adapter status */
2560 	int timeout_cnt;	/* used in for loops */
2561 
2562 	/* Make sure the adapter is in a state that we can issue the DMA command in */
2563 
2564 	status = dfx_hw_adap_state_rd(bp);
2565 	if ((status == PI_STATE_K_RESET)		||
2566 		(status == PI_STATE_K_HALTED)		||
2567 		(status == PI_STATE_K_DMA_UNAVAIL)	||
2568 		(status == PI_STATE_K_UPGRADE))
2569 		return DFX_K_OUTSTATE;
2570 
2571 	/* Put response buffer on the command response queue */
2572 
2573 	bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2574 			((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2575 	bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2576 
2577 	/* Bump (and wrap) the producer index and write out to register */
2578 
2579 	bp->cmd_rsp_reg.index.prod += 1;
2580 	bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2581 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2582 
2583 	/* Put request buffer on the command request queue */
2584 
2585 	bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2586 			PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2587 	bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2588 
2589 	/* Bump (and wrap) the producer index and write out to register */
2590 
2591 	bp->cmd_req_reg.index.prod += 1;
2592 	bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2593 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2594 
2595 	/*
2596 	 * Here we wait for the command request consumer index to be equal
2597 	 * to the producer, indicating that the adapter has DMAed the request.
2598 	 */
2599 
2600 	for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2601 		{
2602 		if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2603 			break;
2604 		udelay(100);			/* wait for 100 microseconds */
2605 		}
2606 	if (timeout_cnt == 0)
2607 		return DFX_K_HW_TIMEOUT;
2608 
2609 	/* Bump (and wrap) the completion index and write out to register */
2610 
2611 	bp->cmd_req_reg.index.comp += 1;
2612 	bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2613 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2614 
2615 	/*
2616 	 * Here we wait for the command response consumer index to be equal
2617 	 * to the producer, indicating that the adapter has DMAed the response.
2618 	 */
2619 
2620 	for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2621 		{
2622 		if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2623 			break;
2624 		udelay(100);			/* wait for 100 microseconds */
2625 		}
2626 	if (timeout_cnt == 0)
2627 		return DFX_K_HW_TIMEOUT;
2628 
2629 	/* Bump (and wrap) the completion index and write out to register */
2630 
2631 	bp->cmd_rsp_reg.index.comp += 1;
2632 	bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2633 	dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2634 	return DFX_K_SUCCESS;
2635 	}
2636 
2637 
2638 /*
2639  * ========================
2640  * = dfx_hw_port_ctrl_req =
2641  * ========================
2642  *
2643  * Overview:
2644  *   Sends PDQ port control command to adapter firmware
2645  *
2646  * Returns:
2647  *   Host data register value in host_data if ptr is not NULL
2648  *
2649  * Arguments:
2650  *   bp			- pointer to board information
2651  *	 command	- port control command
2652  *	 data_a		- port data A register value
2653  *	 data_b		- port data B register value
2654  *	 host_data	- ptr to host data register value
2655  *
2656  * Functional Description:
2657  *   Send generic port control command to adapter by writing
2658  *   to various PDQ port registers, then polling for completion.
2659  *
2660  * Return Codes:
2661  *   DFX_K_SUCCESS	  - port control command succeeded
2662  *   DFX_K_HW_TIMEOUT - port control command timed out
2663  *
2664  * Assumptions:
2665  *   None
2666  *
2667  * Side Effects:
2668  *   None
2669  */
2670 
2671 static int dfx_hw_port_ctrl_req(
2672 	DFX_board_t	*bp,
2673 	PI_UINT32	command,
2674 	PI_UINT32	data_a,
2675 	PI_UINT32	data_b,
2676 	PI_UINT32	*host_data
2677 	)
2678 
2679 	{
2680 	PI_UINT32	port_cmd;		/* Port Control command register value */
2681 	int			timeout_cnt;	/* used in for loops */
2682 
2683 	/* Set Command Error bit in command longword */
2684 
2685 	port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2686 
2687 	/* Issue port command to the adapter */
2688 
2689 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2690 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2691 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2692 
2693 	/* Now wait for command to complete */
2694 
2695 	if (command == PI_PCTRL_M_BLAST_FLASH)
2696 		timeout_cnt = 600000;	/* set command timeout count to 60 seconds */
2697 	else
2698 		timeout_cnt = 20000;	/* set command timeout count to 2 seconds */
2699 
2700 	for (; timeout_cnt > 0; timeout_cnt--)
2701 		{
2702 		dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2703 		if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2704 			break;
2705 		udelay(100);			/* wait for 100 microseconds */
2706 		}
2707 	if (timeout_cnt == 0)
2708 		return DFX_K_HW_TIMEOUT;
2709 
2710 	/*
2711 	 * If the address of host_data is non-zero, assume caller has supplied a
2712 	 * non NULL pointer, and return the contents of the HOST_DATA register in
2713 	 * it.
2714 	 */
2715 
2716 	if (host_data != NULL)
2717 		dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2718 	return DFX_K_SUCCESS;
2719 	}
2720 
2721 
2722 /*
2723  * =====================
2724  * = dfx_hw_adap_reset =
2725  * =====================
2726  *
2727  * Overview:
2728  *   Resets adapter
2729  *
2730  * Returns:
2731  *   None
2732  *
2733  * Arguments:
2734  *   bp   - pointer to board information
2735  *   type - type of reset to perform
2736  *
2737  * Functional Description:
2738  *   Issue soft reset to adapter by writing to PDQ Port Reset
2739  *   register.  Use incoming reset type to tell adapter what
2740  *   kind of reset operation to perform.
2741  *
2742  * Return Codes:
2743  *   None
2744  *
2745  * Assumptions:
2746  *   This routine merely issues a soft reset to the adapter.
2747  *   It is expected that after this routine returns, the caller
2748  *   will appropriately poll the Port Status register for the
2749  *   adapter to enter the proper state.
2750  *
2751  * Side Effects:
2752  *   Internal adapter registers are cleared.
2753  */
2754 
2755 static void dfx_hw_adap_reset(
2756 	DFX_board_t	*bp,
2757 	PI_UINT32	type
2758 	)
2759 
2760 	{
2761 	/* Set Reset type and assert reset */
2762 
2763 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type);	/* tell adapter type of reset */
2764 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2765 
2766 	/* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
2767 
2768 	udelay(20);
2769 
2770 	/* Deassert reset */
2771 
2772 	dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2773 	}
2774 
2775 
2776 /*
2777  * ========================
2778  * = dfx_hw_adap_state_rd =
2779  * ========================
2780  *
2781  * Overview:
2782  *   Returns current adapter state
2783  *
2784  * Returns:
2785  *   Adapter state per PDQ Port Specification
2786  *
2787  * Arguments:
2788  *   bp - pointer to board information
2789  *
2790  * Functional Description:
2791  *   Reads PDQ Port Status register and returns adapter state.
2792  *
2793  * Return Codes:
2794  *   None
2795  *
2796  * Assumptions:
2797  *   None
2798  *
2799  * Side Effects:
2800  *   None
2801  */
2802 
2803 static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2804 	{
2805 	PI_UINT32 port_status;		/* Port Status register value */
2806 
2807 	dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2808 	return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2809 	}
2810 
2811 
2812 /*
2813  * =====================
2814  * = dfx_hw_dma_uninit =
2815  * =====================
2816  *
2817  * Overview:
2818  *   Brings adapter to DMA_UNAVAILABLE state
2819  *
2820  * Returns:
2821  *   Condition code
2822  *
2823  * Arguments:
2824  *   bp   - pointer to board information
2825  *   type - type of reset to perform
2826  *
2827  * Functional Description:
2828  *   Bring adapter to DMA_UNAVAILABLE state by performing the following:
2829  *		1. Set reset type bit in Port Data A Register then reset adapter.
2830  *		2. Check that adapter is in DMA_UNAVAILABLE state.
2831  *
2832  * Return Codes:
2833  *   DFX_K_SUCCESS	  - adapter is in DMA_UNAVAILABLE state
2834  *   DFX_K_HW_TIMEOUT - adapter did not reset properly
2835  *
2836  * Assumptions:
2837  *   None
2838  *
2839  * Side Effects:
2840  *   Internal adapter registers are cleared.
2841  */
2842 
2843 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2844 	{
2845 	int timeout_cnt;	/* used in for loops */
2846 
2847 	/* Set reset type bit and reset adapter */
2848 
2849 	dfx_hw_adap_reset(bp, type);
2850 
2851 	/* Now wait for adapter to enter DMA_UNAVAILABLE state */
2852 
2853 	for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2854 		{
2855 		if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2856 			break;
2857 		udelay(100);					/* wait for 100 microseconds */
2858 		}
2859 	if (timeout_cnt == 0)
2860 		return DFX_K_HW_TIMEOUT;
2861 	return DFX_K_SUCCESS;
2862 	}
2863 
2864 /*
2865  *	Align an sk_buff to a boundary power of 2
2866  *
2867  */
2868 #ifdef DYNAMIC_BUFFERS
2869 static void my_skb_align(struct sk_buff *skb, int n)
2870 {
2871 	unsigned long x = (unsigned long)skb->data;
2872 	unsigned long v;
2873 
2874 	v = ALIGN(x, n);	/* Where we want to be */
2875 
2876 	skb_reserve(skb, v - x);
2877 }
2878 #endif
2879 
2880 /*
2881  * ================
2882  * = dfx_rcv_init =
2883  * ================
2884  *
2885  * Overview:
2886  *   Produces buffers to adapter LLC Host receive descriptor block
2887  *
2888  * Returns:
2889  *   None
2890  *
2891  * Arguments:
2892  *   bp - pointer to board information
2893  *   get_buffers - non-zero if buffers to be allocated
2894  *
2895  * Functional Description:
2896  *   This routine can be called during dfx_adap_init() or during an adapter
2897  *	 reset.  It initializes the descriptor block and produces all allocated
2898  *   LLC Host queue receive buffers.
2899  *
2900  * Return Codes:
2901  *   Return 0 on success or -ENOMEM if buffer allocation failed (when using
2902  *   dynamic buffer allocation). If the buffer allocation failed, the
2903  *   already allocated buffers will not be released and the caller should do
2904  *   this.
2905  *
2906  * Assumptions:
2907  *   The PDQ has been reset and the adapter and driver maintained Type 2
2908  *   register indices are cleared.
2909  *
2910  * Side Effects:
2911  *   Receive buffers are posted to the adapter LLC queue and the adapter
2912  *   is notified.
2913  */
2914 
2915 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2916 	{
2917 	int	i, j;					/* used in for loop */
2918 
2919 	/*
2920 	 *  Since each receive buffer is a single fragment of same length, initialize
2921 	 *  first longword in each receive descriptor for entire LLC Host descriptor
2922 	 *  block.  Also initialize second longword in each receive descriptor with
2923 	 *  physical address of receive buffer.  We'll always allocate receive
2924 	 *  buffers in powers of 2 so that we can easily fill the 256 entry descriptor
2925 	 *  block and produce new receive buffers by simply updating the receive
2926 	 *  producer index.
2927 	 *
2928 	 * 	Assumptions:
2929 	 *		To support all shipping versions of PDQ, the receive buffer size
2930 	 *		must be mod 128 in length and the physical address must be 128 byte
2931 	 *		aligned.  In other words, bits 0-6 of the length and address must
2932 	 *		be zero for the following descriptor field entries to be correct on
2933 	 *		all PDQ-based boards.  We guaranteed both requirements during
2934 	 *		driver initialization when we allocated memory for the receive buffers.
2935 	 */
2936 
2937 	if (get_buffers) {
2938 #ifdef DYNAMIC_BUFFERS
2939 	for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
2940 		for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2941 		{
2942 			struct sk_buff *newskb;
2943 			dma_addr_t dma_addr;
2944 
2945 			newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE,
2946 						    GFP_NOIO);
2947 			if (!newskb)
2948 				return -ENOMEM;
2949 			/*
2950 			 * align to 128 bytes for compatibility with
2951 			 * the old EISA boards.
2952 			 */
2953 
2954 			my_skb_align(newskb, 128);
2955 			dma_addr = dma_map_single(bp->bus_dev,
2956 						  newskb->data,
2957 						  PI_RCV_DATA_K_SIZE_MAX,
2958 						  DMA_FROM_DEVICE);
2959 			if (dma_mapping_error(bp->bus_dev, dma_addr)) {
2960 				dev_kfree_skb(newskb);
2961 				return -ENOMEM;
2962 			}
2963 			bp->descr_block_virt->rcv_data[i + j].long_0 =
2964 				(u32)(PI_RCV_DESCR_M_SOP |
2965 				      ((PI_RCV_DATA_K_SIZE_MAX /
2966 					PI_ALIGN_K_RCV_DATA_BUFF) <<
2967 				       PI_RCV_DESCR_V_SEG_LEN));
2968 			bp->descr_block_virt->rcv_data[i + j].long_1 =
2969 				(u32)dma_addr;
2970 
2971 			/*
2972 			 * p_rcv_buff_va is only used inside the
2973 			 * kernel so we put the skb pointer here.
2974 			 */
2975 			bp->p_rcv_buff_va[i+j] = (char *) newskb;
2976 		}
2977 #else
2978 	for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
2979 		for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2980 			{
2981 			bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2982 				((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2983 			bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2984 			bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2985 			}
2986 #endif
2987 	}
2988 
2989 	/* Update receive producer and Type 2 register */
2990 
2991 	bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
2992 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
2993 	return 0;
2994 	}
2995 
2996 
2997 /*
2998  * =========================
2999  * = dfx_rcv_queue_process =
3000  * =========================
3001  *
3002  * Overview:
3003  *   Process received LLC frames.
3004  *
3005  * Returns:
3006  *   None
3007  *
3008  * Arguments:
3009  *   bp - pointer to board information
3010  *
3011  * Functional Description:
3012  *   Received LLC frames are processed until there are no more consumed frames.
3013  *   Once all frames are processed, the receive buffers are returned to the
3014  *   adapter.  Note that this algorithm fixes the length of time that can be spent
3015  *   in this routine, because there are a fixed number of receive buffers to
3016  *   process and buffers are not produced until this routine exits and returns
3017  *   to the ISR.
3018  *
3019  * Return Codes:
3020  *   None
3021  *
3022  * Assumptions:
3023  *   None
3024  *
3025  * Side Effects:
3026  *   None
3027  */
3028 
3029 static void dfx_rcv_queue_process(
3030 	DFX_board_t *bp
3031 	)
3032 
3033 	{
3034 	PI_TYPE_2_CONSUMER	*p_type_2_cons;		/* ptr to rcv/xmt consumer block register */
3035 	char				*p_buff;			/* ptr to start of packet receive buffer (FMC descriptor) */
3036 	u32					descr, pkt_len;		/* FMC descriptor field and packet length */
3037 	struct sk_buff		*skb = NULL;			/* pointer to a sk_buff to hold incoming packet data */
3038 
3039 	/* Service all consumed LLC receive frames */
3040 
3041 	p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3042 	while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3043 		{
3044 		/* Process any errors */
3045 		dma_addr_t dma_addr;
3046 		int entry;
3047 
3048 		entry = bp->rcv_xmt_reg.index.rcv_comp;
3049 #ifdef DYNAMIC_BUFFERS
3050 		p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3051 #else
3052 		p_buff = bp->p_rcv_buff_va[entry];
3053 #endif
3054 		dma_addr = bp->descr_block_virt->rcv_data[entry].long_1;
3055 		dma_sync_single_for_cpu(bp->bus_dev,
3056 					dma_addr + RCV_BUFF_K_DESCR,
3057 					sizeof(u32),
3058 					DMA_FROM_DEVICE);
3059 		memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3060 
3061 		if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3062 			{
3063 			if (descr & PI_FMC_DESCR_M_RCC_CRC)
3064 				bp->rcv_crc_errors++;
3065 			else
3066 				bp->rcv_frame_status_errors++;
3067 			}
3068 		else
3069 		{
3070 			int rx_in_place = 0;
3071 
3072 			/* The frame was received without errors - verify packet length */
3073 
3074 			pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3075 			pkt_len -= 4;				/* subtract 4 byte CRC */
3076 			if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3077 				bp->rcv_length_errors++;
3078 			else{
3079 #ifdef DYNAMIC_BUFFERS
3080 				struct sk_buff *newskb = NULL;
3081 
3082 				if (pkt_len > SKBUFF_RX_COPYBREAK) {
3083 					dma_addr_t new_dma_addr;
3084 
3085 					newskb = netdev_alloc_skb(bp->dev,
3086 								  NEW_SKB_SIZE);
3087 					if (newskb){
3088 						my_skb_align(newskb, 128);
3089 						new_dma_addr = dma_map_single(
3090 								bp->bus_dev,
3091 								newskb->data,
3092 								PI_RCV_DATA_K_SIZE_MAX,
3093 								DMA_FROM_DEVICE);
3094 						if (dma_mapping_error(
3095 								bp->bus_dev,
3096 								new_dma_addr)) {
3097 							dev_kfree_skb(newskb);
3098 							newskb = NULL;
3099 						}
3100 					}
3101 					if (newskb) {
3102 						rx_in_place = 1;
3103 
3104 						skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3105 						dma_unmap_single(bp->bus_dev,
3106 							dma_addr,
3107 							PI_RCV_DATA_K_SIZE_MAX,
3108 							DMA_FROM_DEVICE);
3109 						skb_reserve(skb, RCV_BUFF_K_PADDING);
3110 						bp->p_rcv_buff_va[entry] = (char *)newskb;
3111 						bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr;
3112 					}
3113 				}
3114 				if (!newskb)
3115 #endif
3116 					/* Alloc new buffer to pass up,
3117 					 * add room for PRH. */
3118 					skb = netdev_alloc_skb(bp->dev,
3119 							       pkt_len + 3);
3120 				if (skb == NULL)
3121 					{
3122 					printk("%s: Could not allocate receive buffer.  Dropping packet.\n", bp->dev->name);
3123 					bp->rcv_discards++;
3124 					break;
3125 					}
3126 				else {
3127 					if (!rx_in_place) {
3128 						/* Receive buffer allocated, pass receive packet up */
3129 						dma_sync_single_for_cpu(
3130 							bp->bus_dev,
3131 							dma_addr +
3132 							RCV_BUFF_K_PADDING,
3133 							pkt_len + 3,
3134 							DMA_FROM_DEVICE);
3135 
3136 						skb_copy_to_linear_data(skb,
3137 							       p_buff + RCV_BUFF_K_PADDING,
3138 							       pkt_len + 3);
3139 					}
3140 
3141 					skb_reserve(skb,3);		/* adjust data field so that it points to FC byte */
3142 					skb_put(skb, pkt_len);		/* pass up packet length, NOT including CRC */
3143 					skb->protocol = fddi_type_trans(skb, bp->dev);
3144 					bp->rcv_total_bytes += skb->len;
3145 					netif_rx(skb);
3146 
3147 					/* Update the rcv counters */
3148 					bp->rcv_total_frames++;
3149 					if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3150 						bp->rcv_multicast_frames++;
3151 				}
3152 			}
3153 			}
3154 
3155 		/*
3156 		 * Advance the producer (for recycling) and advance the completion
3157 		 * (for servicing received frames).  Note that it is okay to
3158 		 * advance the producer without checking that it passes the
3159 		 * completion index because they are both advanced at the same
3160 		 * rate.
3161 		 */
3162 
3163 		bp->rcv_xmt_reg.index.rcv_prod += 1;
3164 		bp->rcv_xmt_reg.index.rcv_comp += 1;
3165 		}
3166 	}
3167 
3168 
3169 /*
3170  * =====================
3171  * = dfx_xmt_queue_pkt =
3172  * =====================
3173  *
3174  * Overview:
3175  *   Queues packets for transmission
3176  *
3177  * Returns:
3178  *   Condition code
3179  *
3180  * Arguments:
3181  *   skb - pointer to sk_buff to queue for transmission
3182  *   dev - pointer to device information
3183  *
3184  * Functional Description:
3185  *   Here we assume that an incoming skb transmit request
3186  *   is contained in a single physically contiguous buffer
3187  *   in which the virtual address of the start of packet
3188  *   (skb->data) can be converted to a physical address
3189  *   by using pci_map_single().
3190  *
3191  *   Since the adapter architecture requires a three byte
3192  *   packet request header to prepend the start of packet,
3193  *   we'll write the three byte field immediately prior to
3194  *   the FC byte.  This assumption is valid because we've
3195  *   ensured that dev->hard_header_len includes three pad
3196  *   bytes.  By posting a single fragment to the adapter,
3197  *   we'll reduce the number of descriptor fetches and
3198  *   bus traffic needed to send the request.
3199  *
3200  *   Also, we can't free the skb until after it's been DMA'd
3201  *   out by the adapter, so we'll queue it in the driver and
3202  *   return it in dfx_xmt_done.
3203  *
3204  * Return Codes:
3205  *   0 - driver queued packet, link is unavailable, or skbuff was bad
3206  *	 1 - caller should requeue the sk_buff for later transmission
3207  *
3208  * Assumptions:
3209  *	 First and foremost, we assume the incoming skb pointer
3210  *   is NOT NULL and is pointing to a valid sk_buff structure.
3211  *
3212  *   The outgoing packet is complete, starting with the
3213  *   frame control byte including the last byte of data,
3214  *   but NOT including the 4 byte CRC.  We'll let the
3215  *   adapter hardware generate and append the CRC.
3216  *
3217  *   The entire packet is stored in one physically
3218  *   contiguous buffer which is not cached and whose
3219  *   32-bit physical address can be determined.
3220  *
3221  *   It's vital that this routine is NOT reentered for the
3222  *   same board and that the OS is not in another section of
3223  *   code (eg. dfx_int_common) for the same board on a
3224  *   different thread.
3225  *
3226  * Side Effects:
3227  *   None
3228  */
3229 
3230 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3231 				     struct net_device *dev)
3232 	{
3233 	DFX_board_t		*bp = netdev_priv(dev);
3234 	u8			prod;				/* local transmit producer index */
3235 	PI_XMT_DESCR		*p_xmt_descr;		/* ptr to transmit descriptor block entry */
3236 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3237 	dma_addr_t		dma_addr;
3238 	unsigned long		flags;
3239 
3240 	netif_stop_queue(dev);
3241 
3242 	/*
3243 	 * Verify that incoming transmit request is OK
3244 	 *
3245 	 * Note: The packet size check is consistent with other
3246 	 *		 Linux device drivers, although the correct packet
3247 	 *		 size should be verified before calling the
3248 	 *		 transmit routine.
3249 	 */
3250 
3251 	if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3252 	{
3253 		printk("%s: Invalid packet length - %u bytes\n",
3254 			dev->name, skb->len);
3255 		bp->xmt_length_errors++;		/* bump error counter */
3256 		netif_wake_queue(dev);
3257 		dev_kfree_skb(skb);
3258 		return NETDEV_TX_OK;			/* return "success" */
3259 	}
3260 	/*
3261 	 * See if adapter link is available, if not, free buffer
3262 	 *
3263 	 * Note: If the link isn't available, free buffer and return 0
3264 	 *		 rather than tell the upper layer to requeue the packet.
3265 	 *		 The methodology here is that by the time the link
3266 	 *		 becomes available, the packet to be sent will be
3267 	 *		 fairly stale.  By simply dropping the packet, the
3268 	 *		 higher layer protocols will eventually time out
3269 	 *		 waiting for response packets which it won't receive.
3270 	 */
3271 
3272 	if (bp->link_available == PI_K_FALSE)
3273 		{
3274 		if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL)	/* is link really available? */
3275 			bp->link_available = PI_K_TRUE;		/* if so, set flag and continue */
3276 		else
3277 			{
3278 			bp->xmt_discards++;					/* bump error counter */
3279 			dev_kfree_skb(skb);		/* free sk_buff now */
3280 			netif_wake_queue(dev);
3281 			return NETDEV_TX_OK;		/* return "success" */
3282 			}
3283 		}
3284 
3285 	/* Write the three PRH bytes immediately before the FC byte */
3286 
3287 	skb_push(skb, 3);
3288 	skb->data[0] = DFX_PRH0_BYTE;	/* these byte values are defined */
3289 	skb->data[1] = DFX_PRH1_BYTE;	/* in the Motorola FDDI MAC chip */
3290 	skb->data[2] = DFX_PRH2_BYTE;	/* specification */
3291 
3292 	dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
3293 				  DMA_TO_DEVICE);
3294 	if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3295 		skb_pull(skb, 3);
3296 		return NETDEV_TX_BUSY;
3297 	}
3298 
3299 	spin_lock_irqsave(&bp->lock, flags);
3300 
3301 	/* Get the current producer and the next free xmt data descriptor */
3302 
3303 	prod		= bp->rcv_xmt_reg.index.xmt_prod;
3304 	p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3305 
3306 	/*
3307 	 * Get pointer to auxiliary queue entry to contain information
3308 	 * for this packet.
3309 	 *
3310 	 * Note: The current xmt producer index will become the
3311 	 *	 current xmt completion index when we complete this
3312 	 *	 packet later on.  So, we'll get the pointer to the
3313 	 *	 next auxiliary queue entry now before we bump the
3314 	 *	 producer index.
3315 	 */
3316 
3317 	p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);	/* also bump producer index */
3318 
3319 	/*
3320 	 * Write the descriptor with buffer info and bump producer
3321 	 *
3322 	 * Note: Since we need to start DMA from the packet request
3323 	 *		 header, we'll add 3 bytes to the DMA buffer length,
3324 	 *		 and we'll determine the physical address of the
3325 	 *		 buffer from the PRH, not skb->data.
3326 	 *
3327 	 * Assumptions:
3328 	 *		 1. Packet starts with the frame control (FC) byte
3329 	 *		    at skb->data.
3330 	 *		 2. The 4-byte CRC is not appended to the buffer or
3331 	 *			included in the length.
3332 	 *		 3. Packet length (skb->len) is from FC to end of
3333 	 *			data, inclusive.
3334 	 *		 4. The packet length does not exceed the maximum
3335 	 *			FDDI LLC frame length of 4491 bytes.
3336 	 *		 5. The entire packet is contained in a physically
3337 	 *			contiguous, non-cached, locked memory space
3338 	 *			comprised of a single buffer pointed to by
3339 	 *			skb->data.
3340 	 *		 6. The physical address of the start of packet
3341 	 *			can be determined from the virtual address
3342 	 *			by using pci_map_single() and is only 32-bits
3343 	 *			wide.
3344 	 */
3345 
3346 	p_xmt_descr->long_0	= (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3347 	p_xmt_descr->long_1 = (u32)dma_addr;
3348 
3349 	/*
3350 	 * Verify that descriptor is actually available
3351 	 *
3352 	 * Note: If descriptor isn't available, return 1 which tells
3353 	 *	 the upper layer to requeue the packet for later
3354 	 *	 transmission.
3355 	 *
3356 	 *       We need to ensure that the producer never reaches the
3357 	 *	 completion, except to indicate that the queue is empty.
3358 	 */
3359 
3360 	if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3361 	{
3362 		skb_pull(skb,3);
3363 		spin_unlock_irqrestore(&bp->lock, flags);
3364 		return NETDEV_TX_BUSY;	/* requeue packet for later */
3365 	}
3366 
3367 	/*
3368 	 * Save info for this packet for xmt done indication routine
3369 	 *
3370 	 * Normally, we'd save the producer index in the p_xmt_drv_descr
3371 	 * structure so that we'd have it handy when we complete this
3372 	 * packet later (in dfx_xmt_done).  However, since the current
3373 	 * transmit architecture guarantees a single fragment for the
3374 	 * entire packet, we can simply bump the completion index by
3375 	 * one (1) for each completed packet.
3376 	 *
3377 	 * Note: If this assumption changes and we're presented with
3378 	 *	 an inconsistent number of transmit fragments for packet
3379 	 *	 data, we'll need to modify this code to save the current
3380 	 *	 transmit producer index.
3381 	 */
3382 
3383 	p_xmt_drv_descr->p_skb = skb;
3384 
3385 	/* Update Type 2 register */
3386 
3387 	bp->rcv_xmt_reg.index.xmt_prod = prod;
3388 	dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3389 	spin_unlock_irqrestore(&bp->lock, flags);
3390 	netif_wake_queue(dev);
3391 	return NETDEV_TX_OK;	/* packet queued to adapter */
3392 	}
3393 
3394 
3395 /*
3396  * ================
3397  * = dfx_xmt_done =
3398  * ================
3399  *
3400  * Overview:
3401  *   Processes all frames that have been transmitted.
3402  *
3403  * Returns:
3404  *   None
3405  *
3406  * Arguments:
3407  *   bp - pointer to board information
3408  *
3409  * Functional Description:
3410  *   For all consumed transmit descriptors that have not
3411  *   yet been completed, we'll free the skb we were holding
3412  *   onto using dev_kfree_skb and bump the appropriate
3413  *   counters.
3414  *
3415  * Return Codes:
3416  *   None
3417  *
3418  * Assumptions:
3419  *   The Type 2 register is not updated in this routine.  It is
3420  *   assumed that it will be updated in the ISR when dfx_xmt_done
3421  *   returns.
3422  *
3423  * Side Effects:
3424  *   None
3425  */
3426 
3427 static int dfx_xmt_done(DFX_board_t *bp)
3428 	{
3429 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3430 	PI_TYPE_2_CONSUMER	*p_type_2_cons;		/* ptr to rcv/xmt consumer block register */
3431 	u8			comp;			/* local transmit completion index */
3432 	int 			freed = 0;		/* buffers freed */
3433 
3434 	/* Service all consumed transmit frames */
3435 
3436 	p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3437 	while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3438 		{
3439 		/* Get pointer to the transmit driver descriptor block information */
3440 
3441 		p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3442 
3443 		/* Increment transmit counters */
3444 
3445 		bp->xmt_total_frames++;
3446 		bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3447 
3448 		/* Return skb to operating system */
3449 		comp = bp->rcv_xmt_reg.index.xmt_comp;
3450 		dma_unmap_single(bp->bus_dev,
3451 				 bp->descr_block_virt->xmt_data[comp].long_1,
3452 				 p_xmt_drv_descr->p_skb->len,
3453 				 DMA_TO_DEVICE);
3454 		dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3455 
3456 		/*
3457 		 * Move to start of next packet by updating completion index
3458 		 *
3459 		 * Here we assume that a transmit packet request is always
3460 		 * serviced by posting one fragment.  We can therefore
3461 		 * simplify the completion code by incrementing the
3462 		 * completion index by one.  This code will need to be
3463 		 * modified if this assumption changes.  See comments
3464 		 * in dfx_xmt_queue_pkt for more details.
3465 		 */
3466 
3467 		bp->rcv_xmt_reg.index.xmt_comp += 1;
3468 		freed++;
3469 		}
3470 	return freed;
3471 	}
3472 
3473 
3474 /*
3475  * =================
3476  * = dfx_rcv_flush =
3477  * =================
3478  *
3479  * Overview:
3480  *   Remove all skb's in the receive ring.
3481  *
3482  * Returns:
3483  *   None
3484  *
3485  * Arguments:
3486  *   bp - pointer to board information
3487  *
3488  * Functional Description:
3489  *   Free's all the dynamically allocated skb's that are
3490  *   currently attached to the device receive ring. This
3491  *   function is typically only used when the device is
3492  *   initialized or reinitialized.
3493  *
3494  * Return Codes:
3495  *   None
3496  *
3497  * Side Effects:
3498  *   None
3499  */
3500 #ifdef DYNAMIC_BUFFERS
3501 static void dfx_rcv_flush( DFX_board_t *bp )
3502 	{
3503 	int i, j;
3504 
3505 	for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3506 		for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3507 		{
3508 			struct sk_buff *skb;
3509 			skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3510 			if (skb) {
3511 				dma_unmap_single(bp->bus_dev,
3512 						 bp->descr_block_virt->rcv_data[i+j].long_1,
3513 						 PI_RCV_DATA_K_SIZE_MAX,
3514 						 DMA_FROM_DEVICE);
3515 				dev_kfree_skb(skb);
3516 			}
3517 			bp->p_rcv_buff_va[i+j] = NULL;
3518 		}
3519 
3520 	}
3521 #endif /* DYNAMIC_BUFFERS */
3522 
3523 /*
3524  * =================
3525  * = dfx_xmt_flush =
3526  * =================
3527  *
3528  * Overview:
3529  *   Processes all frames whether they've been transmitted
3530  *   or not.
3531  *
3532  * Returns:
3533  *   None
3534  *
3535  * Arguments:
3536  *   bp - pointer to board information
3537  *
3538  * Functional Description:
3539  *   For all produced transmit descriptors that have not
3540  *   yet been completed, we'll free the skb we were holding
3541  *   onto using dev_kfree_skb and bump the appropriate
3542  *   counters.  Of course, it's possible that some of
3543  *   these transmit requests actually did go out, but we
3544  *   won't make that distinction here.  Finally, we'll
3545  *   update the consumer index to match the producer.
3546  *
3547  * Return Codes:
3548  *   None
3549  *
3550  * Assumptions:
3551  *   This routine does NOT update the Type 2 register.  It
3552  *   is assumed that this routine is being called during a
3553  *   transmit flush interrupt, or a shutdown or close routine.
3554  *
3555  * Side Effects:
3556  *   None
3557  */
3558 
3559 static void dfx_xmt_flush( DFX_board_t *bp )
3560 	{
3561 	u32			prod_cons;		/* rcv/xmt consumer block longword */
3562 	XMT_DRIVER_DESCR	*p_xmt_drv_descr;	/* ptr to transmit driver descriptor */
3563 	u8			comp;			/* local transmit completion index */
3564 
3565 	/* Flush all outstanding transmit frames */
3566 
3567 	while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3568 		{
3569 		/* Get pointer to the transmit driver descriptor block information */
3570 
3571 		p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3572 
3573 		/* Return skb to operating system */
3574 		comp = bp->rcv_xmt_reg.index.xmt_comp;
3575 		dma_unmap_single(bp->bus_dev,
3576 				 bp->descr_block_virt->xmt_data[comp].long_1,
3577 				 p_xmt_drv_descr->p_skb->len,
3578 				 DMA_TO_DEVICE);
3579 		dev_kfree_skb(p_xmt_drv_descr->p_skb);
3580 
3581 		/* Increment transmit error counter */
3582 
3583 		bp->xmt_discards++;
3584 
3585 		/*
3586 		 * Move to start of next packet by updating completion index
3587 		 *
3588 		 * Here we assume that a transmit packet request is always
3589 		 * serviced by posting one fragment.  We can therefore
3590 		 * simplify the completion code by incrementing the
3591 		 * completion index by one.  This code will need to be
3592 		 * modified if this assumption changes.  See comments
3593 		 * in dfx_xmt_queue_pkt for more details.
3594 		 */
3595 
3596 		bp->rcv_xmt_reg.index.xmt_comp += 1;
3597 		}
3598 
3599 	/* Update the transmit consumer index in the consumer block */
3600 
3601 	prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3602 	prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3603 	bp->cons_block_virt->xmt_rcv_data = prod_cons;
3604 	}
3605 
3606 /*
3607  * ==================
3608  * = dfx_unregister =
3609  * ==================
3610  *
3611  * Overview:
3612  *   Shuts down an FDDI controller
3613  *
3614  * Returns:
3615  *   Condition code
3616  *
3617  * Arguments:
3618  *   bdev - pointer to device information
3619  *
3620  * Functional Description:
3621  *
3622  * Return Codes:
3623  *   None
3624  *
3625  * Assumptions:
3626  *   It compiles so it should work :-( (PCI cards do :-)
3627  *
3628  * Side Effects:
3629  *   Device structures for FDDI adapters (fddi0, fddi1, etc) are
3630  *   freed.
3631  */
3632 static void dfx_unregister(struct device *bdev)
3633 {
3634 	struct net_device *dev = dev_get_drvdata(bdev);
3635 	DFX_board_t *bp = netdev_priv(dev);
3636 	int dfx_bus_pci = dev_is_pci(bdev);
3637 	int dfx_bus_tc = DFX_BUS_TC(bdev);
3638 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3639 	resource_size_t bar_start = 0;		/* pointer to port */
3640 	resource_size_t bar_len = 0;		/* resource length */
3641 	int		alloc_size;		/* total buffer size used */
3642 
3643 	unregister_netdev(dev);
3644 
3645 	alloc_size = sizeof(PI_DESCR_BLOCK) +
3646 		     PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3647 #ifndef DYNAMIC_BUFFERS
3648 		     (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3649 #endif
3650 		     sizeof(PI_CONSUMER_BLOCK) +
3651 		     (PI_ALIGN_K_DESC_BLK - 1);
3652 	if (bp->kmalloced)
3653 		dma_free_coherent(bdev, alloc_size,
3654 				  bp->kmalloced, bp->kmalloced_dma);
3655 
3656 	dfx_bus_uninit(dev);
3657 
3658 	dfx_get_bars(bdev, &bar_start, &bar_len);
3659 	if (dfx_use_mmio) {
3660 		iounmap(bp->base.mem);
3661 		release_mem_region(bar_start, bar_len);
3662 	} else
3663 		release_region(bar_start, bar_len);
3664 
3665 	if (dfx_bus_pci)
3666 		pci_disable_device(to_pci_dev(bdev));
3667 
3668 	free_netdev(dev);
3669 }
3670 
3671 
3672 static int __maybe_unused dfx_dev_register(struct device *);
3673 static int __maybe_unused dfx_dev_unregister(struct device *);
3674 
3675 #ifdef CONFIG_PCI
3676 static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3677 static void dfx_pci_unregister(struct pci_dev *);
3678 
3679 static const struct pci_device_id dfx_pci_table[] = {
3680 	{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3681 	{ }
3682 };
3683 MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3684 
3685 static struct pci_driver dfx_pci_driver = {
3686 	.name		= "defxx",
3687 	.id_table	= dfx_pci_table,
3688 	.probe		= dfx_pci_register,
3689 	.remove		= dfx_pci_unregister,
3690 };
3691 
3692 static int dfx_pci_register(struct pci_dev *pdev,
3693 			    const struct pci_device_id *ent)
3694 {
3695 	return dfx_register(&pdev->dev);
3696 }
3697 
3698 static void dfx_pci_unregister(struct pci_dev *pdev)
3699 {
3700 	dfx_unregister(&pdev->dev);
3701 }
3702 #endif /* CONFIG_PCI */
3703 
3704 #ifdef CONFIG_EISA
3705 static struct eisa_device_id dfx_eisa_table[] = {
3706         { "DEC3001", DEFEA_PROD_ID_1 },
3707         { "DEC3002", DEFEA_PROD_ID_2 },
3708         { "DEC3003", DEFEA_PROD_ID_3 },
3709         { "DEC3004", DEFEA_PROD_ID_4 },
3710         { }
3711 };
3712 MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3713 
3714 static struct eisa_driver dfx_eisa_driver = {
3715 	.id_table	= dfx_eisa_table,
3716 	.driver		= {
3717 		.name	= "defxx",
3718 		.bus	= &eisa_bus_type,
3719 		.probe	= dfx_dev_register,
3720 		.remove	= dfx_dev_unregister,
3721 	},
3722 };
3723 #endif /* CONFIG_EISA */
3724 
3725 #ifdef CONFIG_TC
3726 static struct tc_device_id const dfx_tc_table[] = {
3727 	{ "DEC     ", "PMAF-FA " },
3728 	{ "DEC     ", "PMAF-FD " },
3729 	{ "DEC     ", "PMAF-FS " },
3730 	{ "DEC     ", "PMAF-FU " },
3731 	{ }
3732 };
3733 MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3734 
3735 static struct tc_driver dfx_tc_driver = {
3736 	.id_table	= dfx_tc_table,
3737 	.driver		= {
3738 		.name	= "defxx",
3739 		.bus	= &tc_bus_type,
3740 		.probe	= dfx_dev_register,
3741 		.remove	= dfx_dev_unregister,
3742 	},
3743 };
3744 #endif /* CONFIG_TC */
3745 
3746 static int __maybe_unused dfx_dev_register(struct device *dev)
3747 {
3748 	int status;
3749 
3750 	status = dfx_register(dev);
3751 	if (!status)
3752 		get_device(dev);
3753 	return status;
3754 }
3755 
3756 static int __maybe_unused dfx_dev_unregister(struct device *dev)
3757 {
3758 	put_device(dev);
3759 	dfx_unregister(dev);
3760 	return 0;
3761 }
3762 
3763 
3764 static int dfx_init(void)
3765 {
3766 	int status;
3767 
3768 	status = pci_register_driver(&dfx_pci_driver);
3769 	if (!status)
3770 		status = eisa_driver_register(&dfx_eisa_driver);
3771 	if (!status)
3772 		status = tc_register_driver(&dfx_tc_driver);
3773 	return status;
3774 }
3775 
3776 static void dfx_cleanup(void)
3777 {
3778 	tc_unregister_driver(&dfx_tc_driver);
3779 	eisa_driver_unregister(&dfx_eisa_driver);
3780 	pci_unregister_driver(&dfx_pci_driver);
3781 }
3782 
3783 module_init(dfx_init);
3784 module_exit(dfx_cleanup);
3785 MODULE_AUTHOR("Lawrence V. Stefani");
3786 MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3787 		   DRV_VERSION " " DRV_RELDATE);
3788 MODULE_LICENSE("GPL");
3789