1 /*
2 * File Name:
3 * defxx.c
4 *
5 * Copyright Information:
6 * Copyright Digital Equipment Corporation 1996.
7 *
8 * This software may be used and distributed according to the terms of
9 * the GNU General Public License, incorporated herein by reference.
10 *
11 * Abstract:
12 * A Linux device driver supporting the Digital Equipment Corporation
13 * FDDI TURBOchannel, EISA and PCI controller families. Supported
14 * adapters include:
15 *
16 * DEC FDDIcontroller/TURBOchannel (DEFTA)
17 * DEC FDDIcontroller/EISA (DEFEA)
18 * DEC FDDIcontroller/PCI (DEFPA)
19 *
20 * The original author:
21 * LVS Lawrence V. Stefani <lstefani@yahoo.com>
22 *
23 * Maintainers:
24 * macro Maciej W. Rozycki <macro@orcam.me.uk>
25 *
26 * Credits:
27 * I'd like to thank Patricia Cross for helping me get started with
28 * Linux, David Davies for a lot of help upgrading and configuring
29 * my development system and for answering many OS and driver
30 * development questions, and Alan Cox for recommendations and
31 * integration help on getting FDDI support into Linux. LVS
32 *
33 * Driver Architecture:
34 * The driver architecture is largely based on previous driver work
35 * for other operating systems. The upper edge interface and
36 * functions were largely taken from existing Linux device drivers
37 * such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
38 * driver.
39 *
40 * Adapter Probe -
41 * The driver scans for supported EISA adapters by reading the
42 * SLOT ID register for each EISA slot and making a match
43 * against the expected value.
44 *
45 * Bus-Specific Initialization -
46 * This driver currently supports both EISA and PCI controller
47 * families. While the custom DMA chip and FDDI logic is similar
48 * or identical, the bus logic is very different. After
49 * initialization, the only bus-specific differences is in how the
50 * driver enables and disables interrupts. Other than that, the
51 * run-time critical code behaves the same on both families.
52 * It's important to note that both adapter families are configured
53 * to I/O map, rather than memory map, the adapter registers.
54 *
55 * Driver Open/Close -
56 * In the driver open routine, the driver ISR (interrupt service
57 * routine) is registered and the adapter is brought to an
58 * operational state. In the driver close routine, the opposite
59 * occurs; the driver ISR is deregistered and the adapter is
60 * brought to a safe, but closed state. Users may use consecutive
61 * commands to bring the adapter up and down as in the following
62 * example:
63 * ifconfig fddi0 up
64 * ifconfig fddi0 down
65 * ifconfig fddi0 up
66 *
67 * Driver Shutdown -
68 * Apparently, there is no shutdown or halt routine support under
69 * Linux. This routine would be called during "reboot" or
70 * "shutdown" to allow the driver to place the adapter in a safe
71 * state before a warm reboot occurs. To be really safe, the user
72 * should close the adapter before shutdown (eg. ifconfig fddi0 down)
73 * to ensure that the adapter DMA engine is taken off-line. However,
74 * the current driver code anticipates this problem and always issues
75 * a soft reset of the adapter at the beginning of driver initialization.
76 * A future driver enhancement in this area may occur in 2.1.X where
77 * Alan indicated that a shutdown handler may be implemented.
78 *
79 * Interrupt Service Routine -
80 * The driver supports shared interrupts, so the ISR is registered for
81 * each board with the appropriate flag and the pointer to that board's
82 * device structure. This provides the context during interrupt
83 * processing to support shared interrupts and multiple boards.
84 *
85 * Interrupt enabling/disabling can occur at many levels. At the host
86 * end, you can disable system interrupts, or disable interrupts at the
87 * PIC (on Intel systems). Across the bus, both EISA and PCI adapters
88 * have a bus-logic chip interrupt enable/disable as well as a DMA
89 * controller interrupt enable/disable.
90 *
91 * The driver currently enables and disables adapter interrupts at the
92 * bus-logic chip and assumes that Linux will take care of clearing or
93 * acknowledging any host-based interrupt chips.
94 *
95 * Control Functions -
96 * Control functions are those used to support functions such as adding
97 * or deleting multicast addresses, enabling or disabling packet
98 * reception filters, or other custom/proprietary commands. Presently,
99 * the driver supports the "get statistics", "set multicast list", and
100 * "set mac address" functions defined by Linux. A list of possible
101 * enhancements include:
102 *
103 * - Custom ioctl interface for executing port interface commands
104 * - Custom ioctl interface for adding unicast addresses to
105 * adapter CAM (to support bridge functions).
106 * - Custom ioctl interface for supporting firmware upgrades.
107 *
108 * Hardware (port interface) Support Routines -
109 * The driver function names that start with "dfx_hw_" represent
110 * low-level port interface routines that are called frequently. They
111 * include issuing a DMA or port control command to the adapter,
112 * resetting the adapter, or reading the adapter state. Since the
113 * driver initialization and run-time code must make calls into the
114 * port interface, these routines were written to be as generic and
115 * usable as possible.
116 *
117 * Receive Path -
118 * The adapter DMA engine supports a 256 entry receive descriptor block
119 * of which up to 255 entries can be used at any given time. The
120 * architecture is a standard producer, consumer, completion model in
121 * which the driver "produces" receive buffers to the adapter, the
122 * adapter "consumes" the receive buffers by DMAing incoming packet data,
123 * and the driver "completes" the receive buffers by servicing the
124 * incoming packet, then "produces" a new buffer and starts the cycle
125 * again. Receive buffers can be fragmented in up to 16 fragments
126 * (descriptor entries). For simplicity, this driver posts
127 * single-fragment receive buffers of 4608 bytes, then allocates a
128 * sk_buff, copies the data, then reposts the buffer. To reduce CPU
129 * utilization, a better approach would be to pass up the receive
130 * buffer (no extra copy) then allocate and post a replacement buffer.
131 * This is a performance enhancement that should be looked into at
132 * some point.
133 *
134 * Transmit Path -
135 * Like the receive path, the adapter DMA engine supports a 256 entry
136 * transmit descriptor block of which up to 255 entries can be used at
137 * any given time. Transmit buffers can be fragmented in up to 255
138 * fragments (descriptor entries). This driver always posts one
139 * fragment per transmit packet request.
140 *
141 * The fragment contains the entire packet from FC to end of data.
142 * Before posting the buffer to the adapter, the driver sets a three-byte
143 * packet request header (PRH) which is required by the Motorola MAC chip
144 * used on the adapters. The PRH tells the MAC the type of token to
145 * receive/send, whether or not to generate and append the CRC, whether
146 * synchronous or asynchronous framing is used, etc. Since the PRH
147 * definition is not necessarily consistent across all FDDI chipsets,
148 * the driver, rather than the common FDDI packet handler routines,
149 * sets these bytes.
150 *
151 * To reduce the amount of descriptor fetches needed per transmit request,
152 * the driver takes advantage of the fact that there are at least three
153 * bytes available before the skb->data field on the outgoing transmit
154 * request. This is guaranteed by having fddi_setup() in net_init.c set
155 * dev->hard_header_len to 24 bytes. 21 bytes accounts for the largest
156 * header in an 802.2 SNAP frame. The other 3 bytes are the extra "pad"
157 * bytes which we'll use to store the PRH.
158 *
159 * There's a subtle advantage to adding these pad bytes to the
160 * hard_header_len, it ensures that the data portion of the packet for
161 * an 802.2 SNAP frame is longword aligned. Other FDDI driver
162 * implementations may not need the extra padding and can start copying
163 * or DMAing directly from the FC byte which starts at skb->data. Should
164 * another driver implementation need ADDITIONAL padding, the net_init.c
165 * module should be updated and dev->hard_header_len should be increased.
166 * NOTE: To maintain the alignment on the data portion of the packet,
167 * dev->hard_header_len should always be evenly divisible by 4 and at
168 * least 24 bytes in size.
169 *
170 * Modification History:
171 * Date Name Description
172 * 16-Aug-96 LVS Created.
173 * 20-Aug-96 LVS Updated dfx_probe so that version information
174 * string is only displayed if 1 or more cards are
175 * found. Changed dfx_rcv_queue_process to copy
176 * 3 NULL bytes before FC to ensure that data is
177 * longword aligned in receive buffer.
178 * 09-Sep-96 LVS Updated dfx_ctl_set_multicast_list to enable
179 * LLC group promiscuous mode if multicast list
180 * is too large. LLC individual/group promiscuous
181 * mode is now disabled if IFF_PROMISC flag not set.
182 * dfx_xmt_queue_pkt no longer checks for NULL skb
183 * on Alan Cox recommendation. Added node address
184 * override support.
185 * 12-Sep-96 LVS Reset current address to factory address during
186 * device open. Updated transmit path to post a
187 * single fragment which includes PRH->end of data.
188 * Mar 2000 AC Did various cleanups for 2.3.x
189 * Jun 2000 jgarzik PCI and resource alloc cleanups
190 * Jul 2000 tjeerd Much cleanup and some bug fixes
191 * Sep 2000 tjeerd Fix leak on unload, cosmetic code cleanup
192 * Feb 2001 Skb allocation fixes
193 * Feb 2001 davej PCI enable cleanups.
194 * 04 Aug 2003 macro Converted to the DMA API.
195 * 14 Aug 2004 macro Fix device names reported.
196 * 14 Jun 2005 macro Use irqreturn_t.
197 * 23 Oct 2006 macro Big-endian host support.
198 * 14 Dec 2006 macro TURBOchannel support.
199 * 01 Jul 2014 macro Fixes for DMA on 64-bit hosts.
200 * 10 Mar 2021 macro Dynamic MMIO vs port I/O.
201 */
202
203 /* Include files */
204 #include <linux/bitops.h>
205 #include <linux/compiler.h>
206 #include <linux/delay.h>
207 #include <linux/dma-mapping.h>
208 #include <linux/eisa.h>
209 #include <linux/errno.h>
210 #include <linux/fddidevice.h>
211 #include <linux/interrupt.h>
212 #include <linux/ioport.h>
213 #include <linux/kernel.h>
214 #include <linux/module.h>
215 #include <linux/netdevice.h>
216 #include <linux/pci.h>
217 #include <linux/skbuff.h>
218 #include <linux/slab.h>
219 #include <linux/string.h>
220 #include <linux/tc.h>
221
222 #include <asm/byteorder.h>
223 #include <asm/io.h>
224
225 #include "defxx.h"
226
227 /* Version information string should be updated prior to each new release! */
228 #define DRV_NAME "defxx"
229 #define DRV_VERSION "v1.12"
230 #define DRV_RELDATE "2021/03/10"
231
232 static const char version[] =
233 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
234 " Lawrence V. Stefani and others\n";
235
236 #define DYNAMIC_BUFFERS 1
237
238 #define SKBUFF_RX_COPYBREAK 200
239 /*
240 * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
241 * alignment for compatibility with old EISA boards.
242 */
243 #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
244
245 #ifdef CONFIG_EISA
246 #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
247 #else
248 #define DFX_BUS_EISA(dev) 0
249 #endif
250
251 #ifdef CONFIG_TC
252 #define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
253 #else
254 #define DFX_BUS_TC(dev) 0
255 #endif
256
257 #if defined(CONFIG_EISA) || defined(CONFIG_PCI)
258 #define dfx_use_mmio bp->mmio
259 #else
260 #define dfx_use_mmio true
261 #endif
262
263 /* Define module-wide (static) routines */
264
265 static void dfx_bus_init(struct net_device *dev);
266 static void dfx_bus_uninit(struct net_device *dev);
267 static void dfx_bus_config_check(DFX_board_t *bp);
268
269 static int dfx_driver_init(struct net_device *dev,
270 const char *print_name,
271 resource_size_t bar_start);
272 static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
273
274 static int dfx_open(struct net_device *dev);
275 static int dfx_close(struct net_device *dev);
276
277 static void dfx_int_pr_halt_id(DFX_board_t *bp);
278 static void dfx_int_type_0_process(DFX_board_t *bp);
279 static void dfx_int_common(struct net_device *dev);
280 static irqreturn_t dfx_interrupt(int irq, void *dev_id);
281
282 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
283 static void dfx_ctl_set_multicast_list(struct net_device *dev);
284 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
285 static int dfx_ctl_update_cam(DFX_board_t *bp);
286 static int dfx_ctl_update_filters(DFX_board_t *bp);
287
288 static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
289 static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
290 static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
291 static int dfx_hw_adap_state_rd(DFX_board_t *bp);
292 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
293
294 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
295 static void dfx_rcv_queue_process(DFX_board_t *bp);
296 #ifdef DYNAMIC_BUFFERS
297 static void dfx_rcv_flush(DFX_board_t *bp);
298 #else
dfx_rcv_flush(DFX_board_t * bp)299 static inline void dfx_rcv_flush(DFX_board_t *bp) {}
300 #endif
301
302 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
303 struct net_device *dev);
304 static int dfx_xmt_done(DFX_board_t *bp);
305 static void dfx_xmt_flush(DFX_board_t *bp);
306
307 /* Define module-wide (static) variables */
308
309 static struct pci_driver dfx_pci_driver;
310 static struct eisa_driver dfx_eisa_driver;
311 static struct tc_driver dfx_tc_driver;
312
313
314 /*
315 * =======================
316 * = dfx_port_write_long =
317 * = dfx_port_read_long =
318 * =======================
319 *
320 * Overview:
321 * Routines for reading and writing values from/to adapter
322 *
323 * Returns:
324 * None
325 *
326 * Arguments:
327 * bp - pointer to board information
328 * offset - register offset from base I/O address
329 * data - for dfx_port_write_long, this is a value to write;
330 * for dfx_port_read_long, this is a pointer to store
331 * the read value
332 *
333 * Functional Description:
334 * These routines perform the correct operation to read or write
335 * the adapter register.
336 *
337 * EISA port block base addresses are based on the slot number in which the
338 * controller is installed. For example, if the EISA controller is installed
339 * in slot 4, the port block base address is 0x4000. If the controller is
340 * installed in slot 2, the port block base address is 0x2000, and so on.
341 * This port block can be used to access PDQ, ESIC, and DEFEA on-board
342 * registers using the register offsets defined in DEFXX.H.
343 *
344 * PCI port block base addresses are assigned by the PCI BIOS or system
345 * firmware. There is one 128 byte port block which can be accessed. It
346 * allows for I/O mapping of both PDQ and PFI registers using the register
347 * offsets defined in DEFXX.H.
348 *
349 * Return Codes:
350 * None
351 *
352 * Assumptions:
353 * bp->base is a valid base I/O address for this adapter.
354 * offset is a valid register offset for this adapter.
355 *
356 * Side Effects:
357 * Rather than produce macros for these functions, these routines
358 * are defined using "inline" to ensure that the compiler will
359 * generate inline code and not waste a procedure call and return.
360 * This provides all the benefits of macros, but with the
361 * advantage of strict data type checking.
362 */
363
dfx_writel(DFX_board_t * bp,int offset,u32 data)364 static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
365 {
366 writel(data, bp->base.mem + offset);
367 mb();
368 }
369
dfx_outl(DFX_board_t * bp,int offset,u32 data)370 static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
371 {
372 outl(data, bp->base.port + offset);
373 }
374
dfx_port_write_long(DFX_board_t * bp,int offset,u32 data)375 static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
376 {
377 struct device __maybe_unused *bdev = bp->bus_dev;
378
379 if (dfx_use_mmio)
380 dfx_writel(bp, offset, data);
381 else
382 dfx_outl(bp, offset, data);
383 }
384
385
dfx_readl(DFX_board_t * bp,int offset,u32 * data)386 static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
387 {
388 mb();
389 *data = readl(bp->base.mem + offset);
390 }
391
dfx_inl(DFX_board_t * bp,int offset,u32 * data)392 static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
393 {
394 *data = inl(bp->base.port + offset);
395 }
396
dfx_port_read_long(DFX_board_t * bp,int offset,u32 * data)397 static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
398 {
399 struct device __maybe_unused *bdev = bp->bus_dev;
400
401 if (dfx_use_mmio)
402 dfx_readl(bp, offset, data);
403 else
404 dfx_inl(bp, offset, data);
405 }
406
407
408 /*
409 * ================
410 * = dfx_get_bars =
411 * ================
412 *
413 * Overview:
414 * Retrieves the address ranges used to access control and status
415 * registers.
416 *
417 * Returns:
418 * None
419 *
420 * Arguments:
421 * bp - pointer to board information
422 * bar_start - pointer to store the start addresses
423 * bar_len - pointer to store the lengths of the areas
424 *
425 * Assumptions:
426 * I am sure there are some.
427 *
428 * Side Effects:
429 * None
430 */
dfx_get_bars(DFX_board_t * bp,resource_size_t * bar_start,resource_size_t * bar_len)431 static void dfx_get_bars(DFX_board_t *bp,
432 resource_size_t *bar_start, resource_size_t *bar_len)
433 {
434 struct device *bdev = bp->bus_dev;
435 int dfx_bus_pci = dev_is_pci(bdev);
436 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
437 int dfx_bus_tc = DFX_BUS_TC(bdev);
438
439 if (dfx_bus_pci) {
440 int num = dfx_use_mmio ? 0 : 1;
441
442 bar_start[0] = pci_resource_start(to_pci_dev(bdev), num);
443 bar_len[0] = pci_resource_len(to_pci_dev(bdev), num);
444 bar_start[2] = bar_start[1] = 0;
445 bar_len[2] = bar_len[1] = 0;
446 }
447 if (dfx_bus_eisa) {
448 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
449 resource_size_t bar_lo;
450 resource_size_t bar_hi;
451
452 if (dfx_use_mmio) {
453 bar_lo = inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_2);
454 bar_lo <<= 8;
455 bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_1);
456 bar_lo <<= 8;
457 bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_0);
458 bar_lo <<= 8;
459 bar_start[0] = bar_lo;
460 bar_hi = inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_2);
461 bar_hi <<= 8;
462 bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_1);
463 bar_hi <<= 8;
464 bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_0);
465 bar_hi <<= 8;
466 bar_len[0] = ((bar_hi - bar_lo) | PI_MEM_ADD_MASK_M) +
467 1;
468 } else {
469 bar_start[0] = base_addr;
470 bar_len[0] = PI_ESIC_K_CSR_IO_LEN;
471 }
472 bar_start[1] = base_addr + PI_DEFEA_K_BURST_HOLDOFF;
473 bar_len[1] = PI_ESIC_K_BURST_HOLDOFF_LEN;
474 bar_start[2] = base_addr + PI_ESIC_K_ESIC_CSR;
475 bar_len[2] = PI_ESIC_K_ESIC_CSR_LEN;
476 }
477 if (dfx_bus_tc) {
478 bar_start[0] = to_tc_dev(bdev)->resource.start +
479 PI_TC_K_CSR_OFFSET;
480 bar_len[0] = PI_TC_K_CSR_LEN;
481 bar_start[2] = bar_start[1] = 0;
482 bar_len[2] = bar_len[1] = 0;
483 }
484 }
485
486 static const struct net_device_ops dfx_netdev_ops = {
487 .ndo_open = dfx_open,
488 .ndo_stop = dfx_close,
489 .ndo_start_xmit = dfx_xmt_queue_pkt,
490 .ndo_get_stats = dfx_ctl_get_stats,
491 .ndo_set_rx_mode = dfx_ctl_set_multicast_list,
492 .ndo_set_mac_address = dfx_ctl_set_mac_address,
493 };
494
dfx_register_res_err(const char * print_name,bool mmio,unsigned long start,unsigned long len)495 static void dfx_register_res_err(const char *print_name, bool mmio,
496 unsigned long start, unsigned long len)
497 {
498 pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
499 print_name, mmio ? "MMIO" : "I/O", len, start);
500 }
501
502 /*
503 * ================
504 * = dfx_register =
505 * ================
506 *
507 * Overview:
508 * Initializes a supported FDDI controller
509 *
510 * Returns:
511 * Condition code
512 *
513 * Arguments:
514 * bdev - pointer to device information
515 *
516 * Functional Description:
517 *
518 * Return Codes:
519 * 0 - This device (fddi0, fddi1, etc) configured successfully
520 * -EBUSY - Failed to get resources, or dfx_driver_init failed.
521 *
522 * Assumptions:
523 * It compiles so it should work :-( (PCI cards do :-)
524 *
525 * Side Effects:
526 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
527 * initialized and the board resources are read and stored in
528 * the device structure.
529 */
dfx_register(struct device * bdev)530 static int dfx_register(struct device *bdev)
531 {
532 static int version_disp;
533 int dfx_bus_pci = dev_is_pci(bdev);
534 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
535 const char *print_name = dev_name(bdev);
536 struct net_device *dev;
537 DFX_board_t *bp; /* board pointer */
538 resource_size_t bar_start[3] = {0}; /* pointers to ports */
539 resource_size_t bar_len[3] = {0}; /* resource length */
540 int alloc_size; /* total buffer size used */
541 struct resource *region;
542 int err = 0;
543
544 if (!version_disp) { /* display version info if adapter is found */
545 version_disp = 1; /* set display flag to TRUE so that */
546 printk(version); /* we only display this string ONCE */
547 }
548
549 dev = alloc_fddidev(sizeof(*bp));
550 if (!dev) {
551 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
552 print_name);
553 return -ENOMEM;
554 }
555
556 /* Enable PCI device. */
557 if (dfx_bus_pci) {
558 err = pci_enable_device(to_pci_dev(bdev));
559 if (err) {
560 pr_err("%s: Cannot enable PCI device, aborting\n",
561 print_name);
562 goto err_out;
563 }
564 }
565
566 SET_NETDEV_DEV(dev, bdev);
567
568 bp = netdev_priv(dev);
569 bp->bus_dev = bdev;
570 dev_set_drvdata(bdev, dev);
571
572 bp->mmio = true;
573
574 dfx_get_bars(bp, bar_start, bar_len);
575 if (bar_len[0] == 0 ||
576 (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
577 bp->mmio = false;
578 dfx_get_bars(bp, bar_start, bar_len);
579 }
580
581 if (dfx_use_mmio) {
582 region = request_mem_region(bar_start[0], bar_len[0],
583 bdev->driver->name);
584 if (!region && (dfx_bus_eisa || dfx_bus_pci)) {
585 bp->mmio = false;
586 dfx_get_bars(bp, bar_start, bar_len);
587 }
588 }
589 if (!dfx_use_mmio)
590 region = request_region(bar_start[0], bar_len[0],
591 bdev->driver->name);
592 if (!region) {
593 dfx_register_res_err(print_name, dfx_use_mmio,
594 bar_start[0], bar_len[0]);
595 err = -EBUSY;
596 goto err_out_disable;
597 }
598 if (bar_start[1] != 0) {
599 region = request_region(bar_start[1], bar_len[1],
600 bdev->driver->name);
601 if (!region) {
602 dfx_register_res_err(print_name, 0,
603 bar_start[1], bar_len[1]);
604 err = -EBUSY;
605 goto err_out_csr_region;
606 }
607 }
608 if (bar_start[2] != 0) {
609 region = request_region(bar_start[2], bar_len[2],
610 bdev->driver->name);
611 if (!region) {
612 dfx_register_res_err(print_name, 0,
613 bar_start[2], bar_len[2]);
614 err = -EBUSY;
615 goto err_out_bh_region;
616 }
617 }
618
619 /* Set up I/O base address. */
620 if (dfx_use_mmio) {
621 bp->base.mem = ioremap(bar_start[0], bar_len[0]);
622 if (!bp->base.mem) {
623 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
624 err = -ENOMEM;
625 goto err_out_esic_region;
626 }
627 } else {
628 bp->base.port = bar_start[0];
629 dev->base_addr = bar_start[0];
630 }
631
632 /* Initialize new device structure */
633 dev->netdev_ops = &dfx_netdev_ops;
634
635 if (dfx_bus_pci)
636 pci_set_master(to_pci_dev(bdev));
637
638 if (dfx_driver_init(dev, print_name, bar_start[0]) != DFX_K_SUCCESS) {
639 err = -ENODEV;
640 goto err_out_unmap;
641 }
642
643 err = register_netdev(dev);
644 if (err)
645 goto err_out_kfree;
646
647 printk("%s: registered as %s\n", print_name, dev->name);
648 return 0;
649
650 err_out_kfree:
651 alloc_size = sizeof(PI_DESCR_BLOCK) +
652 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
653 #ifndef DYNAMIC_BUFFERS
654 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
655 #endif
656 sizeof(PI_CONSUMER_BLOCK) +
657 (PI_ALIGN_K_DESC_BLK - 1);
658 if (bp->kmalloced)
659 dma_free_coherent(bdev, alloc_size,
660 bp->kmalloced, bp->kmalloced_dma);
661
662 err_out_unmap:
663 if (dfx_use_mmio)
664 iounmap(bp->base.mem);
665
666 err_out_esic_region:
667 if (bar_start[2] != 0)
668 release_region(bar_start[2], bar_len[2]);
669
670 err_out_bh_region:
671 if (bar_start[1] != 0)
672 release_region(bar_start[1], bar_len[1]);
673
674 err_out_csr_region:
675 if (dfx_use_mmio)
676 release_mem_region(bar_start[0], bar_len[0]);
677 else
678 release_region(bar_start[0], bar_len[0]);
679
680 err_out_disable:
681 if (dfx_bus_pci)
682 pci_disable_device(to_pci_dev(bdev));
683
684 err_out:
685 free_netdev(dev);
686 return err;
687 }
688
689
690 /*
691 * ================
692 * = dfx_bus_init =
693 * ================
694 *
695 * Overview:
696 * Initializes the bus-specific controller logic.
697 *
698 * Returns:
699 * None
700 *
701 * Arguments:
702 * dev - pointer to device information
703 *
704 * Functional Description:
705 * Determine and save adapter IRQ in device table,
706 * then perform bus-specific logic initialization.
707 *
708 * Return Codes:
709 * None
710 *
711 * Assumptions:
712 * bp->base has already been set with the proper
713 * base I/O address for this device.
714 *
715 * Side Effects:
716 * Interrupts are enabled at the adapter bus-specific logic.
717 * Note: Interrupts at the DMA engine (PDQ chip) are not
718 * enabled yet.
719 */
720
dfx_bus_init(struct net_device * dev)721 static void dfx_bus_init(struct net_device *dev)
722 {
723 DFX_board_t *bp = netdev_priv(dev);
724 struct device *bdev = bp->bus_dev;
725 int dfx_bus_pci = dev_is_pci(bdev);
726 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
727 int dfx_bus_tc = DFX_BUS_TC(bdev);
728 u8 val;
729
730 DBG_printk("In dfx_bus_init...\n");
731
732 /* Initialize a pointer back to the net_device struct */
733 bp->dev = dev;
734
735 /* Initialize adapter based on bus type */
736
737 if (dfx_bus_tc)
738 dev->irq = to_tc_dev(bdev)->interrupt;
739 if (dfx_bus_eisa) {
740 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
741
742 /* Disable the board before fiddling with the decoders. */
743 outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
744
745 /* Get the interrupt level from the ESIC chip. */
746 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
747 val &= PI_CONFIG_STAT_0_M_IRQ;
748 val >>= PI_CONFIG_STAT_0_V_IRQ;
749
750 switch (val) {
751 case PI_CONFIG_STAT_0_IRQ_K_9:
752 dev->irq = 9;
753 break;
754
755 case PI_CONFIG_STAT_0_IRQ_K_10:
756 dev->irq = 10;
757 break;
758
759 case PI_CONFIG_STAT_0_IRQ_K_11:
760 dev->irq = 11;
761 break;
762
763 case PI_CONFIG_STAT_0_IRQ_K_15:
764 dev->irq = 15;
765 break;
766 }
767
768 /*
769 * Enable memory decoding (MEMCS1) and/or port decoding
770 * (IOCS1/IOCS0) as appropriate in Function Control
771 * Register. MEMCS1 or IOCS0 is used for PDQ registers,
772 * taking 16 32-bit words, while IOCS1 is used for the
773 * Burst Holdoff register, taking a single 32-bit word
774 * only. We use the slot-specific I/O range as per the
775 * ESIC spec, that is set bits 15:12 in the mask registers
776 * to mask them out.
777 */
778
779 /* Set the decode range of the board. */
780 val = 0;
781 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_1);
782 val = PI_DEFEA_K_CSR_IO;
783 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_0);
784
785 val = PI_IO_CMP_M_SLOT;
786 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_1);
787 val = (PI_ESIC_K_CSR_IO_LEN - 1) & ~3;
788 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_0);
789
790 val = 0;
791 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_1);
792 val = PI_DEFEA_K_BURST_HOLDOFF;
793 outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_0);
794
795 val = PI_IO_CMP_M_SLOT;
796 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_1);
797 val = (PI_ESIC_K_BURST_HOLDOFF_LEN - 1) & ~3;
798 outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0);
799
800 /* Enable the decoders. */
801 val = PI_FUNCTION_CNTRL_M_IOCS1;
802 if (dfx_use_mmio)
803 val |= PI_FUNCTION_CNTRL_M_MEMCS1;
804 else
805 val |= PI_FUNCTION_CNTRL_M_IOCS0;
806 outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
807
808 /*
809 * Enable access to the rest of the module
810 * (including PDQ and packet memory).
811 */
812 val = PI_SLOT_CNTRL_M_ENB;
813 outb(val, base_addr + PI_ESIC_K_SLOT_CNTRL);
814
815 /*
816 * Map PDQ registers into memory or port space. This is
817 * done with a bit in the Burst Holdoff register.
818 */
819 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
820 if (dfx_use_mmio)
821 val |= PI_BURST_HOLDOFF_M_MEM_MAP;
822 else
823 val &= ~PI_BURST_HOLDOFF_M_MEM_MAP;
824 outb(val, base_addr + PI_DEFEA_K_BURST_HOLDOFF);
825
826 /* Enable interrupts at EISA bus interface chip (ESIC) */
827 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
828 val |= PI_CONFIG_STAT_0_M_INT_ENB;
829 outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
830 }
831 if (dfx_bus_pci) {
832 struct pci_dev *pdev = to_pci_dev(bdev);
833
834 /* Get the interrupt level from the PCI Configuration Table */
835
836 dev->irq = pdev->irq;
837
838 /* Check Latency Timer and set if less than minimal */
839
840 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
841 if (val < PFI_K_LAT_TIMER_MIN) {
842 val = PFI_K_LAT_TIMER_DEF;
843 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
844 }
845
846 /* Enable interrupts at PCI bus interface chip (PFI) */
847 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
848 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
849 }
850 }
851
852 /*
853 * ==================
854 * = dfx_bus_uninit =
855 * ==================
856 *
857 * Overview:
858 * Uninitializes the bus-specific controller logic.
859 *
860 * Returns:
861 * None
862 *
863 * Arguments:
864 * dev - pointer to device information
865 *
866 * Functional Description:
867 * Perform bus-specific logic uninitialization.
868 *
869 * Return Codes:
870 * None
871 *
872 * Assumptions:
873 * bp->base has already been set with the proper
874 * base I/O address for this device.
875 *
876 * Side Effects:
877 * Interrupts are disabled at the adapter bus-specific logic.
878 */
879
dfx_bus_uninit(struct net_device * dev)880 static void dfx_bus_uninit(struct net_device *dev)
881 {
882 DFX_board_t *bp = netdev_priv(dev);
883 struct device *bdev = bp->bus_dev;
884 int dfx_bus_pci = dev_is_pci(bdev);
885 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
886 u8 val;
887
888 DBG_printk("In dfx_bus_uninit...\n");
889
890 /* Uninitialize adapter based on bus type */
891
892 if (dfx_bus_eisa) {
893 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
894
895 /* Disable interrupts at EISA bus interface chip (ESIC) */
896 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
897 val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
898 outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
899
900 /* Disable the board. */
901 outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
902
903 /* Disable memory and port decoders. */
904 outb(0, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
905 }
906 if (dfx_bus_pci) {
907 /* Disable interrupts at PCI bus interface chip (PFI) */
908 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
909 }
910 }
911
912
913 /*
914 * ========================
915 * = dfx_bus_config_check =
916 * ========================
917 *
918 * Overview:
919 * Checks the configuration (burst size, full-duplex, etc.) If any parameters
920 * are illegal, then this routine will set new defaults.
921 *
922 * Returns:
923 * None
924 *
925 * Arguments:
926 * bp - pointer to board information
927 *
928 * Functional Description:
929 * For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
930 * PDQ, and all FDDI PCI controllers, all values are legal.
931 *
932 * Return Codes:
933 * None
934 *
935 * Assumptions:
936 * dfx_adap_init has NOT been called yet so burst size and other items have
937 * not been set.
938 *
939 * Side Effects:
940 * None
941 */
942
dfx_bus_config_check(DFX_board_t * bp)943 static void dfx_bus_config_check(DFX_board_t *bp)
944 {
945 struct device __maybe_unused *bdev = bp->bus_dev;
946 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
947 int status; /* return code from adapter port control call */
948 u32 host_data; /* LW data returned from port control call */
949
950 DBG_printk("In dfx_bus_config_check...\n");
951
952 /* Configuration check only valid for EISA adapter */
953
954 if (dfx_bus_eisa) {
955 /*
956 * First check if revision 2 EISA controller. Rev. 1 cards used
957 * PDQ revision B, so no workaround needed in this case. Rev. 3
958 * cards used PDQ revision E, so no workaround needed in this
959 * case, either. Only Rev. 2 cards used either Rev. D or E
960 * chips, so we must verify the chip revision on Rev. 2 cards.
961 */
962 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
963 /*
964 * Revision 2 FDDI EISA controller found,
965 * so let's check PDQ revision of adapter.
966 */
967 status = dfx_hw_port_ctrl_req(bp,
968 PI_PCTRL_M_SUB_CMD,
969 PI_SUB_CMD_K_PDQ_REV_GET,
970 0,
971 &host_data);
972 if ((status != DFX_K_SUCCESS) || (host_data == 2))
973 {
974 /*
975 * Either we couldn't determine the PDQ revision, or
976 * we determined that it is at revision D. In either case,
977 * we need to implement the workaround.
978 */
979
980 /* Ensure that the burst size is set to 8 longwords or less */
981
982 switch (bp->burst_size)
983 {
984 case PI_PDATA_B_DMA_BURST_SIZE_32:
985 case PI_PDATA_B_DMA_BURST_SIZE_16:
986 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
987 break;
988
989 default:
990 break;
991 }
992
993 /* Ensure that full-duplex mode is not enabled */
994
995 bp->full_duplex_enb = PI_SNMP_K_FALSE;
996 }
997 }
998 }
999 }
1000
1001
1002 /*
1003 * ===================
1004 * = dfx_driver_init =
1005 * ===================
1006 *
1007 * Overview:
1008 * Initializes remaining adapter board structure information
1009 * and makes sure adapter is in a safe state prior to dfx_open().
1010 *
1011 * Returns:
1012 * Condition code
1013 *
1014 * Arguments:
1015 * dev - pointer to device information
1016 * print_name - printable device name
1017 *
1018 * Functional Description:
1019 * This function allocates additional resources such as the host memory
1020 * blocks needed by the adapter (eg. descriptor and consumer blocks).
1021 * Remaining bus initialization steps are also completed. The adapter
1022 * is also reset so that it is in the DMA_UNAVAILABLE state. The OS
1023 * must call dfx_open() to open the adapter and bring it on-line.
1024 *
1025 * Return Codes:
1026 * DFX_K_SUCCESS - initialization succeeded
1027 * DFX_K_FAILURE - initialization failed - could not allocate memory
1028 * or read adapter MAC address
1029 *
1030 * Assumptions:
1031 * Memory allocated from dma_alloc_coherent() call is physically
1032 * contiguous, locked memory.
1033 *
1034 * Side Effects:
1035 * Adapter is reset and should be in DMA_UNAVAILABLE state before
1036 * returning from this routine.
1037 */
1038
dfx_driver_init(struct net_device * dev,const char * print_name,resource_size_t bar_start)1039 static int dfx_driver_init(struct net_device *dev, const char *print_name,
1040 resource_size_t bar_start)
1041 {
1042 DFX_board_t *bp = netdev_priv(dev);
1043 struct device *bdev = bp->bus_dev;
1044 int dfx_bus_pci = dev_is_pci(bdev);
1045 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1046 int dfx_bus_tc = DFX_BUS_TC(bdev);
1047 int alloc_size; /* total buffer size needed */
1048 char *top_v, *curr_v; /* virtual addrs into memory block */
1049 dma_addr_t top_p, curr_p; /* physical addrs into memory block */
1050 u32 data; /* host data register value */
1051 __le32 le32;
1052 char *board_name = NULL;
1053
1054 DBG_printk("In dfx_driver_init...\n");
1055
1056 /* Initialize bus-specific hardware registers */
1057
1058 dfx_bus_init(dev);
1059
1060 /*
1061 * Initialize default values for configurable parameters
1062 *
1063 * Note: All of these parameters are ones that a user may
1064 * want to customize. It'd be nice to break these
1065 * out into Space.c or someplace else that's more
1066 * accessible/understandable than this file.
1067 */
1068
1069 bp->full_duplex_enb = PI_SNMP_K_FALSE;
1070 bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */
1071 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
1072 bp->rcv_bufs_to_post = RCV_BUFS_DEF;
1073
1074 /*
1075 * Ensure that HW configuration is OK
1076 *
1077 * Note: Depending on the hardware revision, we may need to modify
1078 * some of the configurable parameters to workaround hardware
1079 * limitations. We'll perform this configuration check AFTER
1080 * setting the parameters to their default values.
1081 */
1082
1083 dfx_bus_config_check(bp);
1084
1085 /* Disable PDQ interrupts first */
1086
1087 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1088
1089 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1090
1091 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1092
1093 /* Read the factory MAC address from the adapter then save it */
1094
1095 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1096 &data) != DFX_K_SUCCESS) {
1097 printk("%s: Could not read adapter factory MAC address!\n",
1098 print_name);
1099 return DFX_K_FAILURE;
1100 }
1101 le32 = cpu_to_le32(data);
1102 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1103
1104 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1105 &data) != DFX_K_SUCCESS) {
1106 printk("%s: Could not read adapter factory MAC address!\n",
1107 print_name);
1108 return DFX_K_FAILURE;
1109 }
1110 le32 = cpu_to_le32(data);
1111 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1112
1113 /*
1114 * Set current address to factory address
1115 *
1116 * Note: Node address override support is handled through
1117 * dfx_ctl_set_mac_address.
1118 */
1119
1120 dev_addr_set(dev, bp->factory_mac_addr);
1121 if (dfx_bus_tc)
1122 board_name = "DEFTA";
1123 if (dfx_bus_eisa)
1124 board_name = "DEFEA";
1125 if (dfx_bus_pci)
1126 board_name = "DEFPA";
1127 pr_info("%s: %s at %s addr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1128 print_name, board_name, dfx_use_mmio ? "MMIO" : "I/O",
1129 (long long)bar_start, dev->irq, dev->dev_addr);
1130
1131 /*
1132 * Get memory for descriptor block, consumer block, and other buffers
1133 * that need to be DMA read or written to by the adapter.
1134 */
1135
1136 alloc_size = sizeof(PI_DESCR_BLOCK) +
1137 PI_CMD_REQ_K_SIZE_MAX +
1138 PI_CMD_RSP_K_SIZE_MAX +
1139 #ifndef DYNAMIC_BUFFERS
1140 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1141 #endif
1142 sizeof(PI_CONSUMER_BLOCK) +
1143 (PI_ALIGN_K_DESC_BLK - 1);
1144 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1145 &bp->kmalloced_dma,
1146 GFP_ATOMIC);
1147 if (top_v == NULL)
1148 return DFX_K_FAILURE;
1149
1150 top_p = bp->kmalloced_dma; /* get physical address of buffer */
1151
1152 /*
1153 * To guarantee the 8K alignment required for the descriptor block, 8K - 1
1154 * plus the amount of memory needed was allocated. The physical address
1155 * is now 8K aligned. By carving up the memory in a specific order,
1156 * we'll guarantee the alignment requirements for all other structures.
1157 *
1158 * Note: If the assumptions change regarding the non-paged, non-cached,
1159 * physically contiguous nature of the memory block or the address
1160 * alignments, then we'll need to implement a different algorithm
1161 * for allocating the needed memory.
1162 */
1163
1164 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1165 curr_v = top_v + (curr_p - top_p);
1166
1167 /* Reserve space for descriptor block */
1168
1169 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1170 bp->descr_block_phys = curr_p;
1171 curr_v += sizeof(PI_DESCR_BLOCK);
1172 curr_p += sizeof(PI_DESCR_BLOCK);
1173
1174 /* Reserve space for command request buffer */
1175
1176 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1177 bp->cmd_req_phys = curr_p;
1178 curr_v += PI_CMD_REQ_K_SIZE_MAX;
1179 curr_p += PI_CMD_REQ_K_SIZE_MAX;
1180
1181 /* Reserve space for command response buffer */
1182
1183 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1184 bp->cmd_rsp_phys = curr_p;
1185 curr_v += PI_CMD_RSP_K_SIZE_MAX;
1186 curr_p += PI_CMD_RSP_K_SIZE_MAX;
1187
1188 /* Reserve space for the LLC host receive queue buffers */
1189
1190 bp->rcv_block_virt = curr_v;
1191 bp->rcv_block_phys = curr_p;
1192
1193 #ifndef DYNAMIC_BUFFERS
1194 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1195 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1196 #endif
1197
1198 /* Reserve space for the consumer block */
1199
1200 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1201 bp->cons_block_phys = curr_p;
1202
1203 /* Display virtual and physical addresses if debug driver */
1204
1205 DBG_printk("%s: Descriptor block virt = %p, phys = %pad\n",
1206 print_name, bp->descr_block_virt, &bp->descr_block_phys);
1207 DBG_printk("%s: Command Request buffer virt = %p, phys = %pad\n",
1208 print_name, bp->cmd_req_virt, &bp->cmd_req_phys);
1209 DBG_printk("%s: Command Response buffer virt = %p, phys = %pad\n",
1210 print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys);
1211 DBG_printk("%s: Receive buffer block virt = %p, phys = %pad\n",
1212 print_name, bp->rcv_block_virt, &bp->rcv_block_phys);
1213 DBG_printk("%s: Consumer block virt = %p, phys = %pad\n",
1214 print_name, bp->cons_block_virt, &bp->cons_block_phys);
1215
1216 return DFX_K_SUCCESS;
1217 }
1218
1219
1220 /*
1221 * =================
1222 * = dfx_adap_init =
1223 * =================
1224 *
1225 * Overview:
1226 * Brings the adapter to the link avail/link unavailable state.
1227 *
1228 * Returns:
1229 * Condition code
1230 *
1231 * Arguments:
1232 * bp - pointer to board information
1233 * get_buffers - non-zero if buffers to be allocated
1234 *
1235 * Functional Description:
1236 * Issues the low-level firmware/hardware calls necessary to bring
1237 * the adapter up, or to properly reset and restore adapter during
1238 * run-time.
1239 *
1240 * Return Codes:
1241 * DFX_K_SUCCESS - Adapter brought up successfully
1242 * DFX_K_FAILURE - Adapter initialization failed
1243 *
1244 * Assumptions:
1245 * bp->reset_type should be set to a valid reset type value before
1246 * calling this routine.
1247 *
1248 * Side Effects:
1249 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1250 * upon a successful return of this routine.
1251 */
1252
dfx_adap_init(DFX_board_t * bp,int get_buffers)1253 static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1254 {
1255 DBG_printk("In dfx_adap_init...\n");
1256
1257 /* Disable PDQ interrupts first */
1258
1259 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1260
1261 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1262
1263 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1264 {
1265 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1266 return DFX_K_FAILURE;
1267 }
1268
1269 /*
1270 * When the PDQ is reset, some false Type 0 interrupts may be pending,
1271 * so we'll acknowledge all Type 0 interrupts now before continuing.
1272 */
1273
1274 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1275
1276 /*
1277 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
1278 *
1279 * Note: We only need to clear host copies of these registers. The PDQ reset
1280 * takes care of the on-board register values.
1281 */
1282
1283 bp->cmd_req_reg.lword = 0;
1284 bp->cmd_rsp_reg.lword = 0;
1285 bp->rcv_xmt_reg.lword = 0;
1286
1287 /* Clear consumer block before going to DMA_AVAILABLE state */
1288
1289 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1290
1291 /* Initialize the DMA Burst Size */
1292
1293 if (dfx_hw_port_ctrl_req(bp,
1294 PI_PCTRL_M_SUB_CMD,
1295 PI_SUB_CMD_K_BURST_SIZE_SET,
1296 bp->burst_size,
1297 NULL) != DFX_K_SUCCESS)
1298 {
1299 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1300 return DFX_K_FAILURE;
1301 }
1302
1303 /*
1304 * Set base address of Consumer Block
1305 *
1306 * Assumption: 32-bit physical address of consumer block is 64 byte
1307 * aligned. That is, bits 0-5 of the address must be zero.
1308 */
1309
1310 if (dfx_hw_port_ctrl_req(bp,
1311 PI_PCTRL_M_CONS_BLOCK,
1312 bp->cons_block_phys,
1313 0,
1314 NULL) != DFX_K_SUCCESS)
1315 {
1316 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1317 return DFX_K_FAILURE;
1318 }
1319
1320 /*
1321 * Set the base address of Descriptor Block and bring adapter
1322 * to DMA_AVAILABLE state.
1323 *
1324 * Note: We also set the literal and data swapping requirements
1325 * in this command.
1326 *
1327 * Assumption: 32-bit physical address of descriptor block
1328 * is 8Kbyte aligned.
1329 */
1330 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1331 (u32)(bp->descr_block_phys |
1332 PI_PDATA_A_INIT_M_BSWAP_INIT),
1333 0, NULL) != DFX_K_SUCCESS) {
1334 printk("%s: Could not set descriptor block address!\n",
1335 bp->dev->name);
1336 return DFX_K_FAILURE;
1337 }
1338
1339 /* Set transmit flush timeout value */
1340
1341 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1342 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
1343 bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */
1344 bp->cmd_req_virt->char_set.item[0].item_index = 0;
1345 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
1346 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1347 {
1348 printk("%s: DMA command request failed!\n", bp->dev->name);
1349 return DFX_K_FAILURE;
1350 }
1351
1352 /* Set the initial values for eFDXEnable and MACTReq MIB objects */
1353
1354 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1355 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
1356 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
1357 bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
1358 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
1359 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
1360 bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
1361 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
1362 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1363 {
1364 printk("%s: DMA command request failed!\n", bp->dev->name);
1365 return DFX_K_FAILURE;
1366 }
1367
1368 /* Initialize adapter CAM */
1369
1370 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1371 {
1372 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1373 return DFX_K_FAILURE;
1374 }
1375
1376 /* Initialize adapter filters */
1377
1378 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1379 {
1380 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1381 return DFX_K_FAILURE;
1382 }
1383
1384 /*
1385 * Remove any existing dynamic buffers (i.e. if the adapter is being
1386 * reinitialized)
1387 */
1388
1389 if (get_buffers)
1390 dfx_rcv_flush(bp);
1391
1392 /* Initialize receive descriptor block and produce buffers */
1393
1394 if (dfx_rcv_init(bp, get_buffers))
1395 {
1396 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1397 if (get_buffers)
1398 dfx_rcv_flush(bp);
1399 return DFX_K_FAILURE;
1400 }
1401
1402 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
1403
1404 bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1405 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1406 {
1407 printk("%s: Start command failed\n", bp->dev->name);
1408 if (get_buffers)
1409 dfx_rcv_flush(bp);
1410 return DFX_K_FAILURE;
1411 }
1412
1413 /* Initialization succeeded, reenable PDQ interrupts */
1414
1415 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1416 return DFX_K_SUCCESS;
1417 }
1418
1419
1420 /*
1421 * ============
1422 * = dfx_open =
1423 * ============
1424 *
1425 * Overview:
1426 * Opens the adapter
1427 *
1428 * Returns:
1429 * Condition code
1430 *
1431 * Arguments:
1432 * dev - pointer to device information
1433 *
1434 * Functional Description:
1435 * This function brings the adapter to an operational state.
1436 *
1437 * Return Codes:
1438 * 0 - Adapter was successfully opened
1439 * -EAGAIN - Could not register IRQ or adapter initialization failed
1440 *
1441 * Assumptions:
1442 * This routine should only be called for a device that was
1443 * initialized successfully.
1444 *
1445 * Side Effects:
1446 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1447 * if the open is successful.
1448 */
1449
dfx_open(struct net_device * dev)1450 static int dfx_open(struct net_device *dev)
1451 {
1452 DFX_board_t *bp = netdev_priv(dev);
1453 int ret;
1454
1455 DBG_printk("In dfx_open...\n");
1456
1457 /* Register IRQ - support shared interrupts by passing device ptr */
1458
1459 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1460 dev);
1461 if (ret) {
1462 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1463 return ret;
1464 }
1465
1466 /*
1467 * Set current address to factory MAC address
1468 *
1469 * Note: We've already done this step in dfx_driver_init.
1470 * However, it's possible that a user has set a node
1471 * address override, then closed and reopened the
1472 * adapter. Unless we reset the device address field
1473 * now, we'll continue to use the existing modified
1474 * address.
1475 */
1476
1477 dev_addr_set(dev, bp->factory_mac_addr);
1478
1479 /* Clear local unicast/multicast address tables and counts */
1480
1481 memset(bp->uc_table, 0, sizeof(bp->uc_table));
1482 memset(bp->mc_table, 0, sizeof(bp->mc_table));
1483 bp->uc_count = 0;
1484 bp->mc_count = 0;
1485
1486 /* Disable promiscuous filter settings */
1487
1488 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
1489 bp->group_prom = PI_FSTATE_K_BLOCK;
1490
1491 spin_lock_init(&bp->lock);
1492
1493 /* Reset and initialize adapter */
1494
1495 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */
1496 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1497 {
1498 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1499 free_irq(dev->irq, dev);
1500 return -EAGAIN;
1501 }
1502
1503 /* Set device structure info */
1504 netif_start_queue(dev);
1505 return 0;
1506 }
1507
1508
1509 /*
1510 * =============
1511 * = dfx_close =
1512 * =============
1513 *
1514 * Overview:
1515 * Closes the device/module.
1516 *
1517 * Returns:
1518 * Condition code
1519 *
1520 * Arguments:
1521 * dev - pointer to device information
1522 *
1523 * Functional Description:
1524 * This routine closes the adapter and brings it to a safe state.
1525 * The interrupt service routine is deregistered with the OS.
1526 * The adapter can be opened again with another call to dfx_open().
1527 *
1528 * Return Codes:
1529 * Always return 0.
1530 *
1531 * Assumptions:
1532 * No further requests for this adapter are made after this routine is
1533 * called. dfx_open() can be called to reset and reinitialize the
1534 * adapter.
1535 *
1536 * Side Effects:
1537 * Adapter should be in DMA_UNAVAILABLE state upon completion of this
1538 * routine.
1539 */
1540
dfx_close(struct net_device * dev)1541 static int dfx_close(struct net_device *dev)
1542 {
1543 DFX_board_t *bp = netdev_priv(dev);
1544
1545 DBG_printk("In dfx_close...\n");
1546
1547 /* Disable PDQ interrupts first */
1548
1549 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1550
1551 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1552
1553 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1554
1555 /*
1556 * Flush any pending transmit buffers
1557 *
1558 * Note: It's important that we flush the transmit buffers
1559 * BEFORE we clear our copy of the Type 2 register.
1560 * Otherwise, we'll have no idea how many buffers
1561 * we need to free.
1562 */
1563
1564 dfx_xmt_flush(bp);
1565
1566 /*
1567 * Clear Type 1 and Type 2 registers after adapter reset
1568 *
1569 * Note: Even though we're closing the adapter, it's
1570 * possible that an interrupt will occur after
1571 * dfx_close is called. Without some assurance to
1572 * the contrary we want to make sure that we don't
1573 * process receive and transmit LLC frames and update
1574 * the Type 2 register with bad information.
1575 */
1576
1577 bp->cmd_req_reg.lword = 0;
1578 bp->cmd_rsp_reg.lword = 0;
1579 bp->rcv_xmt_reg.lword = 0;
1580
1581 /* Clear consumer block for the same reason given above */
1582
1583 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1584
1585 /* Release all dynamically allocate skb in the receive ring. */
1586
1587 dfx_rcv_flush(bp);
1588
1589 /* Clear device structure flags */
1590
1591 netif_stop_queue(dev);
1592
1593 /* Deregister (free) IRQ */
1594
1595 free_irq(dev->irq, dev);
1596
1597 return 0;
1598 }
1599
1600
1601 /*
1602 * ======================
1603 * = dfx_int_pr_halt_id =
1604 * ======================
1605 *
1606 * Overview:
1607 * Displays halt id's in string form.
1608 *
1609 * Returns:
1610 * None
1611 *
1612 * Arguments:
1613 * bp - pointer to board information
1614 *
1615 * Functional Description:
1616 * Determine current halt id and display appropriate string.
1617 *
1618 * Return Codes:
1619 * None
1620 *
1621 * Assumptions:
1622 * None
1623 *
1624 * Side Effects:
1625 * None
1626 */
1627
dfx_int_pr_halt_id(DFX_board_t * bp)1628 static void dfx_int_pr_halt_id(DFX_board_t *bp)
1629 {
1630 PI_UINT32 port_status; /* PDQ port status register value */
1631 PI_UINT32 halt_id; /* PDQ port status halt ID */
1632
1633 /* Read the latest port status */
1634
1635 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1636
1637 /* Display halt state transition information */
1638
1639 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1640 switch (halt_id)
1641 {
1642 case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1643 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1644 break;
1645
1646 case PI_HALT_ID_K_PARITY_ERROR:
1647 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1648 break;
1649
1650 case PI_HALT_ID_K_HOST_DIR_HALT:
1651 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1652 break;
1653
1654 case PI_HALT_ID_K_SW_FAULT:
1655 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1656 break;
1657
1658 case PI_HALT_ID_K_HW_FAULT:
1659 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1660 break;
1661
1662 case PI_HALT_ID_K_PC_TRACE:
1663 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1664 break;
1665
1666 case PI_HALT_ID_K_DMA_ERROR:
1667 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1668 break;
1669
1670 case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1671 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1672 break;
1673
1674 case PI_HALT_ID_K_BUS_EXCEPTION:
1675 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1676 break;
1677
1678 default:
1679 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1680 break;
1681 }
1682 }
1683
1684
1685 /*
1686 * ==========================
1687 * = dfx_int_type_0_process =
1688 * ==========================
1689 *
1690 * Overview:
1691 * Processes Type 0 interrupts.
1692 *
1693 * Returns:
1694 * None
1695 *
1696 * Arguments:
1697 * bp - pointer to board information
1698 *
1699 * Functional Description:
1700 * Processes all enabled Type 0 interrupts. If the reason for the interrupt
1701 * is a serious fault on the adapter, then an error message is displayed
1702 * and the adapter is reset.
1703 *
1704 * One tricky potential timing window is the rapid succession of "link avail"
1705 * "link unavail" state change interrupts. The acknowledgement of the Type 0
1706 * interrupt must be done before reading the state from the Port Status
1707 * register. This is true because a state change could occur after reading
1708 * the data, but before acknowledging the interrupt. If this state change
1709 * does happen, it would be lost because the driver is using the old state,
1710 * and it will never know about the new state because it subsequently
1711 * acknowledges the state change interrupt.
1712 *
1713 * INCORRECT CORRECT
1714 * read type 0 int reasons read type 0 int reasons
1715 * read adapter state ack type 0 interrupts
1716 * ack type 0 interrupts read adapter state
1717 * ... process interrupt ... ... process interrupt ...
1718 *
1719 * Return Codes:
1720 * None
1721 *
1722 * Assumptions:
1723 * None
1724 *
1725 * Side Effects:
1726 * An adapter reset may occur if the adapter has any Type 0 error interrupts
1727 * or if the port status indicates that the adapter is halted. The driver
1728 * is responsible for reinitializing the adapter with the current CAM
1729 * contents and adapter filter settings.
1730 */
1731
dfx_int_type_0_process(DFX_board_t * bp)1732 static void dfx_int_type_0_process(DFX_board_t *bp)
1733
1734 {
1735 PI_UINT32 type_0_status; /* Host Interrupt Type 0 register */
1736 PI_UINT32 state; /* current adap state (from port status) */
1737
1738 /*
1739 * Read host interrupt Type 0 register to determine which Type 0
1740 * interrupts are pending. Immediately write it back out to clear
1741 * those interrupts.
1742 */
1743
1744 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1745 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1746
1747 /* Check for Type 0 error interrupts */
1748
1749 if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1750 PI_TYPE_0_STAT_M_PM_PAR_ERR |
1751 PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1752 {
1753 /* Check for Non-Existent Memory error */
1754
1755 if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1756 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1757
1758 /* Check for Packet Memory Parity error */
1759
1760 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1761 printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1762
1763 /* Check for Host Bus Parity error */
1764
1765 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1766 printk("%s: Host Bus Parity Error\n", bp->dev->name);
1767
1768 /* Reset adapter and bring it back on-line */
1769
1770 bp->link_available = PI_K_FALSE; /* link is no longer available */
1771 bp->reset_type = 0; /* rerun on-board diagnostics */
1772 printk("%s: Resetting adapter...\n", bp->dev->name);
1773 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1774 {
1775 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1776 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1777 return;
1778 }
1779 printk("%s: Adapter reset successful!\n", bp->dev->name);
1780 return;
1781 }
1782
1783 /* Check for transmit flush interrupt */
1784
1785 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1786 {
1787 /* Flush any pending xmt's and acknowledge the flush interrupt */
1788
1789 bp->link_available = PI_K_FALSE; /* link is no longer available */
1790 dfx_xmt_flush(bp); /* flush any outstanding packets */
1791 (void) dfx_hw_port_ctrl_req(bp,
1792 PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1793 0,
1794 0,
1795 NULL);
1796 }
1797
1798 /* Check for adapter state change */
1799
1800 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1801 {
1802 /* Get latest adapter state */
1803
1804 state = dfx_hw_adap_state_rd(bp); /* get adapter state */
1805 if (state == PI_STATE_K_HALTED)
1806 {
1807 /*
1808 * Adapter has transitioned to HALTED state, try to reset
1809 * adapter to bring it back on-line. If reset fails,
1810 * leave the adapter in the broken state.
1811 */
1812
1813 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1814 dfx_int_pr_halt_id(bp); /* display halt id as string */
1815
1816 /* Reset adapter and bring it back on-line */
1817
1818 bp->link_available = PI_K_FALSE; /* link is no longer available */
1819 bp->reset_type = 0; /* rerun on-board diagnostics */
1820 printk("%s: Resetting adapter...\n", bp->dev->name);
1821 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1822 {
1823 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1824 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1825 return;
1826 }
1827 printk("%s: Adapter reset successful!\n", bp->dev->name);
1828 }
1829 else if (state == PI_STATE_K_LINK_AVAIL)
1830 {
1831 bp->link_available = PI_K_TRUE; /* set link available flag */
1832 }
1833 }
1834 }
1835
1836
1837 /*
1838 * ==================
1839 * = dfx_int_common =
1840 * ==================
1841 *
1842 * Overview:
1843 * Interrupt service routine (ISR)
1844 *
1845 * Returns:
1846 * None
1847 *
1848 * Arguments:
1849 * bp - pointer to board information
1850 *
1851 * Functional Description:
1852 * This is the ISR which processes incoming adapter interrupts.
1853 *
1854 * Return Codes:
1855 * None
1856 *
1857 * Assumptions:
1858 * This routine assumes PDQ interrupts have not been disabled.
1859 * When interrupts are disabled at the PDQ, the Port Status register
1860 * is automatically cleared. This routine uses the Port Status
1861 * register value to determine whether a Type 0 interrupt occurred,
1862 * so it's important that adapter interrupts are not normally
1863 * enabled/disabled at the PDQ.
1864 *
1865 * It's vital that this routine is NOT reentered for the
1866 * same board and that the OS is not in another section of
1867 * code (eg. dfx_xmt_queue_pkt) for the same board on a
1868 * different thread.
1869 *
1870 * Side Effects:
1871 * Pending interrupts are serviced. Depending on the type of
1872 * interrupt, acknowledging and clearing the interrupt at the
1873 * PDQ involves writing a register to clear the interrupt bit
1874 * or updating completion indices.
1875 */
1876
dfx_int_common(struct net_device * dev)1877 static void dfx_int_common(struct net_device *dev)
1878 {
1879 DFX_board_t *bp = netdev_priv(dev);
1880 PI_UINT32 port_status; /* Port Status register */
1881
1882 /* Process xmt interrupts - frequent case, so always call this routine */
1883
1884 if(dfx_xmt_done(bp)) /* free consumed xmt packets */
1885 netif_wake_queue(dev);
1886
1887 /* Process rcv interrupts - frequent case, so always call this routine */
1888
1889 dfx_rcv_queue_process(bp); /* service received LLC frames */
1890
1891 /*
1892 * Transmit and receive producer and completion indices are updated on the
1893 * adapter by writing to the Type 2 Producer register. Since the frequent
1894 * case is that we'll be processing either LLC transmit or receive buffers,
1895 * we'll optimize I/O writes by doing a single register write here.
1896 */
1897
1898 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1899
1900 /* Read PDQ Port Status register to find out which interrupts need processing */
1901
1902 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1903
1904 /* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
1905
1906 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1907 dfx_int_type_0_process(bp); /* process Type 0 interrupts */
1908 }
1909
1910
1911 /*
1912 * =================
1913 * = dfx_interrupt =
1914 * =================
1915 *
1916 * Overview:
1917 * Interrupt processing routine
1918 *
1919 * Returns:
1920 * Whether a valid interrupt was seen.
1921 *
1922 * Arguments:
1923 * irq - interrupt vector
1924 * dev_id - pointer to device information
1925 *
1926 * Functional Description:
1927 * This routine calls the interrupt processing routine for this adapter. It
1928 * disables and reenables adapter interrupts, as appropriate. We can support
1929 * shared interrupts since the incoming dev_id pointer provides our device
1930 * structure context.
1931 *
1932 * Return Codes:
1933 * IRQ_HANDLED - an IRQ was handled.
1934 * IRQ_NONE - no IRQ was handled.
1935 *
1936 * Assumptions:
1937 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
1938 * on Intel-based systems) is done by the operating system outside this
1939 * routine.
1940 *
1941 * System interrupts are enabled through this call.
1942 *
1943 * Side Effects:
1944 * Interrupts are disabled, then reenabled at the adapter.
1945 */
1946
dfx_interrupt(int irq,void * dev_id)1947 static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1948 {
1949 struct net_device *dev = dev_id;
1950 DFX_board_t *bp = netdev_priv(dev);
1951 struct device *bdev = bp->bus_dev;
1952 int dfx_bus_pci = dev_is_pci(bdev);
1953 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1954 int dfx_bus_tc = DFX_BUS_TC(bdev);
1955
1956 /* Service adapter interrupts */
1957
1958 if (dfx_bus_pci) {
1959 u32 status;
1960
1961 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1962 if (!(status & PFI_STATUS_M_PDQ_INT))
1963 return IRQ_NONE;
1964
1965 spin_lock(&bp->lock);
1966
1967 /* Disable PDQ-PFI interrupts at PFI */
1968 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1969 PFI_MODE_M_DMA_ENB);
1970
1971 /* Call interrupt service routine for this adapter */
1972 dfx_int_common(dev);
1973
1974 /* Clear PDQ interrupt status bit and reenable interrupts */
1975 dfx_port_write_long(bp, PFI_K_REG_STATUS,
1976 PFI_STATUS_M_PDQ_INT);
1977 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1978 (PFI_MODE_M_PDQ_INT_ENB |
1979 PFI_MODE_M_DMA_ENB));
1980
1981 spin_unlock(&bp->lock);
1982 }
1983 if (dfx_bus_eisa) {
1984 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1985 u8 status;
1986
1987 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1988 if (!(status & PI_CONFIG_STAT_0_M_PEND))
1989 return IRQ_NONE;
1990
1991 spin_lock(&bp->lock);
1992
1993 /* Disable interrupts at the ESIC */
1994 status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1995 outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1996
1997 /* Call interrupt service routine for this adapter */
1998 dfx_int_common(dev);
1999
2000 /* Reenable interrupts at the ESIC */
2001 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2002 status |= PI_CONFIG_STAT_0_M_INT_ENB;
2003 outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
2004
2005 spin_unlock(&bp->lock);
2006 }
2007 if (dfx_bus_tc) {
2008 u32 status;
2009
2010 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
2011 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
2012 PI_PSTATUS_M_XMT_DATA_PENDING |
2013 PI_PSTATUS_M_SMT_HOST_PENDING |
2014 PI_PSTATUS_M_UNSOL_PENDING |
2015 PI_PSTATUS_M_CMD_RSP_PENDING |
2016 PI_PSTATUS_M_CMD_REQ_PENDING |
2017 PI_PSTATUS_M_TYPE_0_PENDING)))
2018 return IRQ_NONE;
2019
2020 spin_lock(&bp->lock);
2021
2022 /* Call interrupt service routine for this adapter */
2023 dfx_int_common(dev);
2024
2025 spin_unlock(&bp->lock);
2026 }
2027
2028 return IRQ_HANDLED;
2029 }
2030
2031
2032 /*
2033 * =====================
2034 * = dfx_ctl_get_stats =
2035 * =====================
2036 *
2037 * Overview:
2038 * Get statistics for FDDI adapter
2039 *
2040 * Returns:
2041 * Pointer to FDDI statistics structure
2042 *
2043 * Arguments:
2044 * dev - pointer to device information
2045 *
2046 * Functional Description:
2047 * Gets current MIB objects from adapter, then
2048 * returns FDDI statistics structure as defined
2049 * in if_fddi.h.
2050 *
2051 * Note: Since the FDDI statistics structure is
2052 * still new and the device structure doesn't
2053 * have an FDDI-specific get statistics handler,
2054 * we'll return the FDDI statistics structure as
2055 * a pointer to an Ethernet statistics structure.
2056 * That way, at least the first part of the statistics
2057 * structure can be decoded properly, and it allows
2058 * "smart" applications to perform a second cast to
2059 * decode the FDDI-specific statistics.
2060 *
2061 * We'll have to pay attention to this routine as the
2062 * device structure becomes more mature and LAN media
2063 * independent.
2064 *
2065 * Return Codes:
2066 * None
2067 *
2068 * Assumptions:
2069 * None
2070 *
2071 * Side Effects:
2072 * None
2073 */
2074
dfx_ctl_get_stats(struct net_device * dev)2075 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2076 {
2077 DFX_board_t *bp = netdev_priv(dev);
2078
2079 /* Fill the bp->stats structure with driver-maintained counters */
2080
2081 bp->stats.gen.rx_packets = bp->rcv_total_frames;
2082 bp->stats.gen.tx_packets = bp->xmt_total_frames;
2083 bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
2084 bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
2085 bp->stats.gen.rx_errors = bp->rcv_crc_errors +
2086 bp->rcv_frame_status_errors +
2087 bp->rcv_length_errors;
2088 bp->stats.gen.tx_errors = bp->xmt_length_errors;
2089 bp->stats.gen.rx_dropped = bp->rcv_discards;
2090 bp->stats.gen.tx_dropped = bp->xmt_discards;
2091 bp->stats.gen.multicast = bp->rcv_multicast_frames;
2092 bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */
2093
2094 /* Get FDDI SMT MIB objects */
2095
2096 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2097 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2098 return (struct net_device_stats *)&bp->stats;
2099
2100 /* Fill the bp->stats structure with the SMT MIB object values */
2101
2102 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2103 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2104 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2105 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2106 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2107 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2108 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2109 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2110 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2111 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2112 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2113 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2114 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2115 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2116 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2117 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2118 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2119 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2120 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2121 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2122 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2123 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2124 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2125 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2126 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2127 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2128 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2129 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2130 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2131 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2132 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2133 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2134 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2135 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2136 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2137 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2138 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2139 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2140 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2141 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2142 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2143 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2144 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2145 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2146 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2147 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2148 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2149 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2150 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2151 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2152 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2153 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2154 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2155 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2156 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2157 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2158 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2159 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2160 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2161 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2162 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2163 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2164 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2165 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2166 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2167 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2168 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2169 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2170 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2171 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2172 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2173 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2174 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2175 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2176 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2177 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2178 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2179 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2180 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2181 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2182 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2183 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2184 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2185 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2186 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2187 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2188 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2189 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2190 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2191 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2192 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2193 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2194
2195 /* Get FDDI counters */
2196
2197 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2198 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2199 return (struct net_device_stats *)&bp->stats;
2200
2201 /* Fill the bp->stats structure with the FDDI counter values */
2202
2203 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2204 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2205 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2206 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2207 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2208 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2209 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2210 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2211 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2212 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2213 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2214
2215 return (struct net_device_stats *)&bp->stats;
2216 }
2217
2218
2219 /*
2220 * ==============================
2221 * = dfx_ctl_set_multicast_list =
2222 * ==============================
2223 *
2224 * Overview:
2225 * Enable/Disable LLC frame promiscuous mode reception
2226 * on the adapter and/or update multicast address table.
2227 *
2228 * Returns:
2229 * None
2230 *
2231 * Arguments:
2232 * dev - pointer to device information
2233 *
2234 * Functional Description:
2235 * This routine follows a fairly simple algorithm for setting the
2236 * adapter filters and CAM:
2237 *
2238 * if IFF_PROMISC flag is set
2239 * enable LLC individual/group promiscuous mode
2240 * else
2241 * disable LLC individual/group promiscuous mode
2242 * if number of incoming multicast addresses >
2243 * (CAM max size - number of unicast addresses in CAM)
2244 * enable LLC group promiscuous mode
2245 * set driver-maintained multicast address count to zero
2246 * else
2247 * disable LLC group promiscuous mode
2248 * set driver-maintained multicast address count to incoming count
2249 * update adapter CAM
2250 * update adapter filters
2251 *
2252 * Return Codes:
2253 * None
2254 *
2255 * Assumptions:
2256 * Multicast addresses are presented in canonical (LSB) format.
2257 *
2258 * Side Effects:
2259 * On-board adapter CAM and filters are updated.
2260 */
2261
dfx_ctl_set_multicast_list(struct net_device * dev)2262 static void dfx_ctl_set_multicast_list(struct net_device *dev)
2263 {
2264 DFX_board_t *bp = netdev_priv(dev);
2265 int i; /* used as index in for loop */
2266 struct netdev_hw_addr *ha;
2267
2268 /* Enable LLC frame promiscuous mode, if necessary */
2269
2270 if (dev->flags & IFF_PROMISC)
2271 bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */
2272
2273 /* Else, update multicast address table */
2274
2275 else
2276 {
2277 bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */
2278 /*
2279 * Check whether incoming multicast address count exceeds table size
2280 *
2281 * Note: The adapters utilize an on-board 64 entry CAM for
2282 * supporting perfect filtering of multicast packets
2283 * and bridge functions when adding unicast addresses.
2284 * There is no hash function available. To support
2285 * additional multicast addresses, the all multicast
2286 * filter (LLC group promiscuous mode) must be enabled.
2287 *
2288 * The firmware reserves two CAM entries for SMT-related
2289 * multicast addresses, which leaves 62 entries available.
2290 * The following code ensures that we're not being asked
2291 * to add more than 62 addresses to the CAM. If we are,
2292 * the driver will enable the all multicast filter.
2293 * Should the number of multicast addresses drop below
2294 * the high water mark, the filter will be disabled and
2295 * perfect filtering will be used.
2296 */
2297
2298 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2299 {
2300 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2301 bp->mc_count = 0; /* Don't add mc addrs to CAM */
2302 }
2303 else
2304 {
2305 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
2306 bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
2307 }
2308
2309 /* Copy addresses to multicast address table, then update adapter CAM */
2310
2311 i = 0;
2312 netdev_for_each_mc_addr(ha, dev)
2313 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2314 ha->addr, FDDI_K_ALEN);
2315
2316 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2317 {
2318 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2319 }
2320 else
2321 {
2322 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2323 }
2324 }
2325
2326 /* Update adapter filters */
2327
2328 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2329 {
2330 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2331 }
2332 else
2333 {
2334 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2335 }
2336 }
2337
2338
2339 /*
2340 * ===========================
2341 * = dfx_ctl_set_mac_address =
2342 * ===========================
2343 *
2344 * Overview:
2345 * Add node address override (unicast address) to adapter
2346 * CAM and update dev_addr field in device table.
2347 *
2348 * Returns:
2349 * None
2350 *
2351 * Arguments:
2352 * dev - pointer to device information
2353 * addr - pointer to sockaddr structure containing unicast address to add
2354 *
2355 * Functional Description:
2356 * The adapter supports node address overrides by adding one or more
2357 * unicast addresses to the adapter CAM. This is similar to adding
2358 * multicast addresses. In this routine we'll update the driver and
2359 * device structures with the new address, then update the adapter CAM
2360 * to ensure that the adapter will copy and strip frames destined and
2361 * sourced by that address.
2362 *
2363 * Return Codes:
2364 * Always returns zero.
2365 *
2366 * Assumptions:
2367 * The address pointed to by addr->sa_data is a valid unicast
2368 * address and is presented in canonical (LSB) format.
2369 *
2370 * Side Effects:
2371 * On-board adapter CAM is updated. On-board adapter filters
2372 * may be updated.
2373 */
2374
dfx_ctl_set_mac_address(struct net_device * dev,void * addr)2375 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2376 {
2377 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2378 DFX_board_t *bp = netdev_priv(dev);
2379
2380 /* Copy unicast address to driver-maintained structs and update count */
2381
2382 dev_addr_set(dev, p_sockaddr->sa_data); /* update device struct */
2383 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */
2384 bp->uc_count = 1;
2385
2386 /*
2387 * Verify we're not exceeding the CAM size by adding unicast address
2388 *
2389 * Note: It's possible that before entering this routine we've
2390 * already filled the CAM with 62 multicast addresses.
2391 * Since we need to place the node address override into
2392 * the CAM, we have to check to see that we're not
2393 * exceeding the CAM size. If we are, we have to enable
2394 * the LLC group (multicast) promiscuous mode filter as
2395 * in dfx_ctl_set_multicast_list.
2396 */
2397
2398 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2399 {
2400 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2401 bp->mc_count = 0; /* Don't add mc addrs to CAM */
2402
2403 /* Update adapter filters */
2404
2405 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2406 {
2407 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2408 }
2409 else
2410 {
2411 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2412 }
2413 }
2414
2415 /* Update adapter CAM with new unicast address */
2416
2417 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2418 {
2419 DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2420 }
2421 else
2422 {
2423 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2424 }
2425 return 0; /* always return zero */
2426 }
2427
2428
2429 /*
2430 * ======================
2431 * = dfx_ctl_update_cam =
2432 * ======================
2433 *
2434 * Overview:
2435 * Procedure to update adapter CAM (Content Addressable Memory)
2436 * with desired unicast and multicast address entries.
2437 *
2438 * Returns:
2439 * Condition code
2440 *
2441 * Arguments:
2442 * bp - pointer to board information
2443 *
2444 * Functional Description:
2445 * Updates adapter CAM with current contents of board structure
2446 * unicast and multicast address tables. Since there are only 62
2447 * free entries in CAM, this routine ensures that the command
2448 * request buffer is not overrun.
2449 *
2450 * Return Codes:
2451 * DFX_K_SUCCESS - Request succeeded
2452 * DFX_K_FAILURE - Request failed
2453 *
2454 * Assumptions:
2455 * All addresses being added (unicast and multicast) are in canonical
2456 * order.
2457 *
2458 * Side Effects:
2459 * On-board adapter CAM is updated.
2460 */
2461
dfx_ctl_update_cam(DFX_board_t * bp)2462 static int dfx_ctl_update_cam(DFX_board_t *bp)
2463 {
2464 int i; /* used as index */
2465 PI_LAN_ADDR *p_addr; /* pointer to CAM entry */
2466
2467 /*
2468 * Fill in command request information
2469 *
2470 * Note: Even though both the unicast and multicast address
2471 * table entries are stored as contiguous 6 byte entries,
2472 * the firmware address filter set command expects each
2473 * entry to be two longwords (8 bytes total). We must be
2474 * careful to only copy the six bytes of each unicast and
2475 * multicast table entry into each command entry. This
2476 * is also why we must first clear the entire command
2477 * request buffer.
2478 */
2479
2480 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */
2481 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2482 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2483
2484 /* Now add unicast addresses to command request buffer, if any */
2485
2486 for (i=0; i < (int)bp->uc_count; i++)
2487 {
2488 if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2489 {
2490 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2491 p_addr++; /* point to next command entry */
2492 }
2493 }
2494
2495 /* Now add multicast addresses to command request buffer, if any */
2496
2497 for (i=0; i < (int)bp->mc_count; i++)
2498 {
2499 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2500 {
2501 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2502 p_addr++; /* point to next command entry */
2503 }
2504 }
2505
2506 /* Issue command to update adapter CAM, then return */
2507
2508 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2509 return DFX_K_FAILURE;
2510 return DFX_K_SUCCESS;
2511 }
2512
2513
2514 /*
2515 * ==========================
2516 * = dfx_ctl_update_filters =
2517 * ==========================
2518 *
2519 * Overview:
2520 * Procedure to update adapter filters with desired
2521 * filter settings.
2522 *
2523 * Returns:
2524 * Condition code
2525 *
2526 * Arguments:
2527 * bp - pointer to board information
2528 *
2529 * Functional Description:
2530 * Enables or disables filter using current filter settings.
2531 *
2532 * Return Codes:
2533 * DFX_K_SUCCESS - Request succeeded.
2534 * DFX_K_FAILURE - Request failed.
2535 *
2536 * Assumptions:
2537 * We must always pass up packets destined to the broadcast
2538 * address (FF-FF-FF-FF-FF-FF), so we'll always keep the
2539 * broadcast filter enabled.
2540 *
2541 * Side Effects:
2542 * On-board adapter filters are updated.
2543 */
2544
dfx_ctl_update_filters(DFX_board_t * bp)2545 static int dfx_ctl_update_filters(DFX_board_t *bp)
2546 {
2547 int i = 0; /* used as index */
2548
2549 /* Fill in command request information */
2550
2551 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2552
2553 /* Initialize Broadcast filter - * ALWAYS ENABLED * */
2554
2555 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
2556 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
2557
2558 /* Initialize LLC Individual/Group Promiscuous filter */
2559
2560 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
2561 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
2562
2563 /* Initialize LLC Group Promiscuous filter */
2564
2565 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
2566 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
2567
2568 /* Terminate the item code list */
2569
2570 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
2571
2572 /* Issue command to update adapter filters, then return */
2573
2574 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2575 return DFX_K_FAILURE;
2576 return DFX_K_SUCCESS;
2577 }
2578
2579
2580 /*
2581 * ======================
2582 * = dfx_hw_dma_cmd_req =
2583 * ======================
2584 *
2585 * Overview:
2586 * Sends PDQ DMA command to adapter firmware
2587 *
2588 * Returns:
2589 * Condition code
2590 *
2591 * Arguments:
2592 * bp - pointer to board information
2593 *
2594 * Functional Description:
2595 * The command request and response buffers are posted to the adapter in the manner
2596 * described in the PDQ Port Specification:
2597 *
2598 * 1. Command Response Buffer is posted to adapter.
2599 * 2. Command Request Buffer is posted to adapter.
2600 * 3. Command Request consumer index is polled until it indicates that request
2601 * buffer has been DMA'd to adapter.
2602 * 4. Command Response consumer index is polled until it indicates that response
2603 * buffer has been DMA'd from adapter.
2604 *
2605 * This ordering ensures that a response buffer is already available for the firmware
2606 * to use once it's done processing the request buffer.
2607 *
2608 * Return Codes:
2609 * DFX_K_SUCCESS - DMA command succeeded
2610 * DFX_K_OUTSTATE - Adapter is NOT in proper state
2611 * DFX_K_HW_TIMEOUT - DMA command timed out
2612 *
2613 * Assumptions:
2614 * Command request buffer has already been filled with desired DMA command.
2615 *
2616 * Side Effects:
2617 * None
2618 */
2619
dfx_hw_dma_cmd_req(DFX_board_t * bp)2620 static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2621 {
2622 int status; /* adapter status */
2623 int timeout_cnt; /* used in for loops */
2624
2625 /* Make sure the adapter is in a state that we can issue the DMA command in */
2626
2627 status = dfx_hw_adap_state_rd(bp);
2628 if ((status == PI_STATE_K_RESET) ||
2629 (status == PI_STATE_K_HALTED) ||
2630 (status == PI_STATE_K_DMA_UNAVAIL) ||
2631 (status == PI_STATE_K_UPGRADE))
2632 return DFX_K_OUTSTATE;
2633
2634 /* Put response buffer on the command response queue */
2635
2636 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2637 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2638 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2639
2640 /* Bump (and wrap) the producer index and write out to register */
2641
2642 bp->cmd_rsp_reg.index.prod += 1;
2643 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2644 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2645
2646 /* Put request buffer on the command request queue */
2647
2648 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2649 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2650 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2651
2652 /* Bump (and wrap) the producer index and write out to register */
2653
2654 bp->cmd_req_reg.index.prod += 1;
2655 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2656 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2657
2658 /*
2659 * Here we wait for the command request consumer index to be equal
2660 * to the producer, indicating that the adapter has DMAed the request.
2661 */
2662
2663 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2664 {
2665 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2666 break;
2667 udelay(100); /* wait for 100 microseconds */
2668 }
2669 if (timeout_cnt == 0)
2670 return DFX_K_HW_TIMEOUT;
2671
2672 /* Bump (and wrap) the completion index and write out to register */
2673
2674 bp->cmd_req_reg.index.comp += 1;
2675 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2676 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2677
2678 /*
2679 * Here we wait for the command response consumer index to be equal
2680 * to the producer, indicating that the adapter has DMAed the response.
2681 */
2682
2683 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2684 {
2685 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2686 break;
2687 udelay(100); /* wait for 100 microseconds */
2688 }
2689 if (timeout_cnt == 0)
2690 return DFX_K_HW_TIMEOUT;
2691
2692 /* Bump (and wrap) the completion index and write out to register */
2693
2694 bp->cmd_rsp_reg.index.comp += 1;
2695 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2696 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2697 return DFX_K_SUCCESS;
2698 }
2699
2700
2701 /*
2702 * ========================
2703 * = dfx_hw_port_ctrl_req =
2704 * ========================
2705 *
2706 * Overview:
2707 * Sends PDQ port control command to adapter firmware
2708 *
2709 * Returns:
2710 * Host data register value in host_data if ptr is not NULL
2711 *
2712 * Arguments:
2713 * bp - pointer to board information
2714 * command - port control command
2715 * data_a - port data A register value
2716 * data_b - port data B register value
2717 * host_data - ptr to host data register value
2718 *
2719 * Functional Description:
2720 * Send generic port control command to adapter by writing
2721 * to various PDQ port registers, then polling for completion.
2722 *
2723 * Return Codes:
2724 * DFX_K_SUCCESS - port control command succeeded
2725 * DFX_K_HW_TIMEOUT - port control command timed out
2726 *
2727 * Assumptions:
2728 * None
2729 *
2730 * Side Effects:
2731 * None
2732 */
2733
dfx_hw_port_ctrl_req(DFX_board_t * bp,PI_UINT32 command,PI_UINT32 data_a,PI_UINT32 data_b,PI_UINT32 * host_data)2734 static int dfx_hw_port_ctrl_req(
2735 DFX_board_t *bp,
2736 PI_UINT32 command,
2737 PI_UINT32 data_a,
2738 PI_UINT32 data_b,
2739 PI_UINT32 *host_data
2740 )
2741
2742 {
2743 PI_UINT32 port_cmd; /* Port Control command register value */
2744 int timeout_cnt; /* used in for loops */
2745
2746 /* Set Command Error bit in command longword */
2747
2748 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2749
2750 /* Issue port command to the adapter */
2751
2752 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2753 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2754 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2755
2756 /* Now wait for command to complete */
2757
2758 if (command == PI_PCTRL_M_BLAST_FLASH)
2759 timeout_cnt = 600000; /* set command timeout count to 60 seconds */
2760 else
2761 timeout_cnt = 20000; /* set command timeout count to 2 seconds */
2762
2763 for (; timeout_cnt > 0; timeout_cnt--)
2764 {
2765 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2766 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2767 break;
2768 udelay(100); /* wait for 100 microseconds */
2769 }
2770 if (timeout_cnt == 0)
2771 return DFX_K_HW_TIMEOUT;
2772
2773 /*
2774 * If the address of host_data is non-zero, assume caller has supplied a
2775 * non NULL pointer, and return the contents of the HOST_DATA register in
2776 * it.
2777 */
2778
2779 if (host_data != NULL)
2780 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2781 return DFX_K_SUCCESS;
2782 }
2783
2784
2785 /*
2786 * =====================
2787 * = dfx_hw_adap_reset =
2788 * =====================
2789 *
2790 * Overview:
2791 * Resets adapter
2792 *
2793 * Returns:
2794 * None
2795 *
2796 * Arguments:
2797 * bp - pointer to board information
2798 * type - type of reset to perform
2799 *
2800 * Functional Description:
2801 * Issue soft reset to adapter by writing to PDQ Port Reset
2802 * register. Use incoming reset type to tell adapter what
2803 * kind of reset operation to perform.
2804 *
2805 * Return Codes:
2806 * None
2807 *
2808 * Assumptions:
2809 * This routine merely issues a soft reset to the adapter.
2810 * It is expected that after this routine returns, the caller
2811 * will appropriately poll the Port Status register for the
2812 * adapter to enter the proper state.
2813 *
2814 * Side Effects:
2815 * Internal adapter registers are cleared.
2816 */
2817
dfx_hw_adap_reset(DFX_board_t * bp,PI_UINT32 type)2818 static void dfx_hw_adap_reset(
2819 DFX_board_t *bp,
2820 PI_UINT32 type
2821 )
2822
2823 {
2824 /* Set Reset type and assert reset */
2825
2826 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */
2827 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2828
2829 /* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
2830
2831 udelay(20);
2832
2833 /* Deassert reset */
2834
2835 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2836 }
2837
2838
2839 /*
2840 * ========================
2841 * = dfx_hw_adap_state_rd =
2842 * ========================
2843 *
2844 * Overview:
2845 * Returns current adapter state
2846 *
2847 * Returns:
2848 * Adapter state per PDQ Port Specification
2849 *
2850 * Arguments:
2851 * bp - pointer to board information
2852 *
2853 * Functional Description:
2854 * Reads PDQ Port Status register and returns adapter state.
2855 *
2856 * Return Codes:
2857 * None
2858 *
2859 * Assumptions:
2860 * None
2861 *
2862 * Side Effects:
2863 * None
2864 */
2865
dfx_hw_adap_state_rd(DFX_board_t * bp)2866 static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2867 {
2868 PI_UINT32 port_status; /* Port Status register value */
2869
2870 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2871 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2872 }
2873
2874
2875 /*
2876 * =====================
2877 * = dfx_hw_dma_uninit =
2878 * =====================
2879 *
2880 * Overview:
2881 * Brings adapter to DMA_UNAVAILABLE state
2882 *
2883 * Returns:
2884 * Condition code
2885 *
2886 * Arguments:
2887 * bp - pointer to board information
2888 * type - type of reset to perform
2889 *
2890 * Functional Description:
2891 * Bring adapter to DMA_UNAVAILABLE state by performing the following:
2892 * 1. Set reset type bit in Port Data A Register then reset adapter.
2893 * 2. Check that adapter is in DMA_UNAVAILABLE state.
2894 *
2895 * Return Codes:
2896 * DFX_K_SUCCESS - adapter is in DMA_UNAVAILABLE state
2897 * DFX_K_HW_TIMEOUT - adapter did not reset properly
2898 *
2899 * Assumptions:
2900 * None
2901 *
2902 * Side Effects:
2903 * Internal adapter registers are cleared.
2904 */
2905
dfx_hw_dma_uninit(DFX_board_t * bp,PI_UINT32 type)2906 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2907 {
2908 int timeout_cnt; /* used in for loops */
2909
2910 /* Set reset type bit and reset adapter */
2911
2912 dfx_hw_adap_reset(bp, type);
2913
2914 /* Now wait for adapter to enter DMA_UNAVAILABLE state */
2915
2916 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2917 {
2918 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2919 break;
2920 udelay(100); /* wait for 100 microseconds */
2921 }
2922 if (timeout_cnt == 0)
2923 return DFX_K_HW_TIMEOUT;
2924 return DFX_K_SUCCESS;
2925 }
2926
2927 /*
2928 * Align an sk_buff to a boundary power of 2
2929 *
2930 */
2931 #ifdef DYNAMIC_BUFFERS
my_skb_align(struct sk_buff * skb,int n)2932 static void my_skb_align(struct sk_buff *skb, int n)
2933 {
2934 unsigned long x = (unsigned long)skb->data;
2935 unsigned long v;
2936
2937 v = ALIGN(x, n); /* Where we want to be */
2938
2939 skb_reserve(skb, v - x);
2940 }
2941 #endif
2942
2943 /*
2944 * ================
2945 * = dfx_rcv_init =
2946 * ================
2947 *
2948 * Overview:
2949 * Produces buffers to adapter LLC Host receive descriptor block
2950 *
2951 * Returns:
2952 * None
2953 *
2954 * Arguments:
2955 * bp - pointer to board information
2956 * get_buffers - non-zero if buffers to be allocated
2957 *
2958 * Functional Description:
2959 * This routine can be called during dfx_adap_init() or during an adapter
2960 * reset. It initializes the descriptor block and produces all allocated
2961 * LLC Host queue receive buffers.
2962 *
2963 * Return Codes:
2964 * Return 0 on success or -ENOMEM if buffer allocation failed (when using
2965 * dynamic buffer allocation). If the buffer allocation failed, the
2966 * already allocated buffers will not be released and the caller should do
2967 * this.
2968 *
2969 * Assumptions:
2970 * The PDQ has been reset and the adapter and driver maintained Type 2
2971 * register indices are cleared.
2972 *
2973 * Side Effects:
2974 * Receive buffers are posted to the adapter LLC queue and the adapter
2975 * is notified.
2976 */
2977
dfx_rcv_init(DFX_board_t * bp,int get_buffers)2978 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2979 {
2980 int i, j; /* used in for loop */
2981
2982 /*
2983 * Since each receive buffer is a single fragment of same length, initialize
2984 * first longword in each receive descriptor for entire LLC Host descriptor
2985 * block. Also initialize second longword in each receive descriptor with
2986 * physical address of receive buffer. We'll always allocate receive
2987 * buffers in powers of 2 so that we can easily fill the 256 entry descriptor
2988 * block and produce new receive buffers by simply updating the receive
2989 * producer index.
2990 *
2991 * Assumptions:
2992 * To support all shipping versions of PDQ, the receive buffer size
2993 * must be mod 128 in length and the physical address must be 128 byte
2994 * aligned. In other words, bits 0-6 of the length and address must
2995 * be zero for the following descriptor field entries to be correct on
2996 * all PDQ-based boards. We guaranteed both requirements during
2997 * driver initialization when we allocated memory for the receive buffers.
2998 */
2999
3000 if (get_buffers) {
3001 #ifdef DYNAMIC_BUFFERS
3002 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3003 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3004 {
3005 struct sk_buff *newskb;
3006 dma_addr_t dma_addr;
3007
3008 newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE,
3009 GFP_NOIO);
3010 if (!newskb)
3011 return -ENOMEM;
3012 /*
3013 * align to 128 bytes for compatibility with
3014 * the old EISA boards.
3015 */
3016
3017 my_skb_align(newskb, 128);
3018 dma_addr = dma_map_single(bp->bus_dev,
3019 newskb->data,
3020 PI_RCV_DATA_K_SIZE_MAX,
3021 DMA_FROM_DEVICE);
3022 if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3023 dev_kfree_skb(newskb);
3024 return -ENOMEM;
3025 }
3026 bp->descr_block_virt->rcv_data[i + j].long_0 =
3027 (u32)(PI_RCV_DESCR_M_SOP |
3028 ((PI_RCV_DATA_K_SIZE_MAX /
3029 PI_ALIGN_K_RCV_DATA_BUFF) <<
3030 PI_RCV_DESCR_V_SEG_LEN));
3031 bp->descr_block_virt->rcv_data[i + j].long_1 =
3032 (u32)dma_addr;
3033
3034 /*
3035 * p_rcv_buff_va is only used inside the
3036 * kernel so we put the skb pointer here.
3037 */
3038 bp->p_rcv_buff_va[i+j] = (char *) newskb;
3039 }
3040 #else
3041 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
3042 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3043 {
3044 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
3045 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
3046 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
3047 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
3048 }
3049 #endif
3050 }
3051
3052 /* Update receive producer and Type 2 register */
3053
3054 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
3055 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3056 return 0;
3057 }
3058
3059
3060 /*
3061 * =========================
3062 * = dfx_rcv_queue_process =
3063 * =========================
3064 *
3065 * Overview:
3066 * Process received LLC frames.
3067 *
3068 * Returns:
3069 * None
3070 *
3071 * Arguments:
3072 * bp - pointer to board information
3073 *
3074 * Functional Description:
3075 * Received LLC frames are processed until there are no more consumed frames.
3076 * Once all frames are processed, the receive buffers are returned to the
3077 * adapter. Note that this algorithm fixes the length of time that can be spent
3078 * in this routine, because there are a fixed number of receive buffers to
3079 * process and buffers are not produced until this routine exits and returns
3080 * to the ISR.
3081 *
3082 * Return Codes:
3083 * None
3084 *
3085 * Assumptions:
3086 * None
3087 *
3088 * Side Effects:
3089 * None
3090 */
3091
dfx_rcv_queue_process(DFX_board_t * bp)3092 static void dfx_rcv_queue_process(
3093 DFX_board_t *bp
3094 )
3095
3096 {
3097 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
3098 char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */
3099 u32 descr, pkt_len; /* FMC descriptor field and packet length */
3100 struct sk_buff *skb = NULL; /* pointer to a sk_buff to hold incoming packet data */
3101
3102 /* Service all consumed LLC receive frames */
3103
3104 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3105 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3106 {
3107 /* Process any errors */
3108 dma_addr_t dma_addr;
3109 int entry;
3110
3111 entry = bp->rcv_xmt_reg.index.rcv_comp;
3112 #ifdef DYNAMIC_BUFFERS
3113 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3114 #else
3115 p_buff = bp->p_rcv_buff_va[entry];
3116 #endif
3117 dma_addr = bp->descr_block_virt->rcv_data[entry].long_1;
3118 dma_sync_single_for_cpu(bp->bus_dev,
3119 dma_addr + RCV_BUFF_K_DESCR,
3120 sizeof(u32),
3121 DMA_FROM_DEVICE);
3122 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3123
3124 if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3125 {
3126 if (descr & PI_FMC_DESCR_M_RCC_CRC)
3127 bp->rcv_crc_errors++;
3128 else
3129 bp->rcv_frame_status_errors++;
3130 }
3131 else
3132 {
3133 int rx_in_place = 0;
3134
3135 /* The frame was received without errors - verify packet length */
3136
3137 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3138 pkt_len -= 4; /* subtract 4 byte CRC */
3139 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3140 bp->rcv_length_errors++;
3141 else{
3142 #ifdef DYNAMIC_BUFFERS
3143 struct sk_buff *newskb = NULL;
3144
3145 if (pkt_len > SKBUFF_RX_COPYBREAK) {
3146 dma_addr_t new_dma_addr;
3147
3148 newskb = netdev_alloc_skb(bp->dev,
3149 NEW_SKB_SIZE);
3150 if (newskb){
3151 my_skb_align(newskb, 128);
3152 new_dma_addr = dma_map_single(
3153 bp->bus_dev,
3154 newskb->data,
3155 PI_RCV_DATA_K_SIZE_MAX,
3156 DMA_FROM_DEVICE);
3157 if (dma_mapping_error(
3158 bp->bus_dev,
3159 new_dma_addr)) {
3160 dev_kfree_skb(newskb);
3161 newskb = NULL;
3162 }
3163 }
3164 if (newskb) {
3165 rx_in_place = 1;
3166
3167 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3168 dma_unmap_single(bp->bus_dev,
3169 dma_addr,
3170 PI_RCV_DATA_K_SIZE_MAX,
3171 DMA_FROM_DEVICE);
3172 skb_reserve(skb, RCV_BUFF_K_PADDING);
3173 bp->p_rcv_buff_va[entry] = (char *)newskb;
3174 bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr;
3175 }
3176 }
3177 if (!newskb)
3178 #endif
3179 /* Alloc new buffer to pass up,
3180 * add room for PRH. */
3181 skb = netdev_alloc_skb(bp->dev,
3182 pkt_len + 3);
3183 if (skb == NULL)
3184 {
3185 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
3186 bp->rcv_discards++;
3187 break;
3188 }
3189 else {
3190 if (!rx_in_place) {
3191 /* Receive buffer allocated, pass receive packet up */
3192 dma_sync_single_for_cpu(
3193 bp->bus_dev,
3194 dma_addr +
3195 RCV_BUFF_K_PADDING,
3196 pkt_len + 3,
3197 DMA_FROM_DEVICE);
3198
3199 skb_copy_to_linear_data(skb,
3200 p_buff + RCV_BUFF_K_PADDING,
3201 pkt_len + 3);
3202 }
3203
3204 skb_reserve(skb,3); /* adjust data field so that it points to FC byte */
3205 skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */
3206 skb->protocol = fddi_type_trans(skb, bp->dev);
3207 bp->rcv_total_bytes += skb->len;
3208 netif_rx(skb);
3209
3210 /* Update the rcv counters */
3211 bp->rcv_total_frames++;
3212 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3213 bp->rcv_multicast_frames++;
3214 }
3215 }
3216 }
3217
3218 /*
3219 * Advance the producer (for recycling) and advance the completion
3220 * (for servicing received frames). Note that it is okay to
3221 * advance the producer without checking that it passes the
3222 * completion index because they are both advanced at the same
3223 * rate.
3224 */
3225
3226 bp->rcv_xmt_reg.index.rcv_prod += 1;
3227 bp->rcv_xmt_reg.index.rcv_comp += 1;
3228 }
3229 }
3230
3231
3232 /*
3233 * =====================
3234 * = dfx_xmt_queue_pkt =
3235 * =====================
3236 *
3237 * Overview:
3238 * Queues packets for transmission
3239 *
3240 * Returns:
3241 * Condition code
3242 *
3243 * Arguments:
3244 * skb - pointer to sk_buff to queue for transmission
3245 * dev - pointer to device information
3246 *
3247 * Functional Description:
3248 * Here we assume that an incoming skb transmit request
3249 * is contained in a single physically contiguous buffer
3250 * in which the virtual address of the start of packet
3251 * (skb->data) can be converted to a physical address
3252 * by using dma_map_single().
3253 *
3254 * Since the adapter architecture requires a three byte
3255 * packet request header to prepend the start of packet,
3256 * we'll write the three byte field immediately prior to
3257 * the FC byte. This assumption is valid because we've
3258 * ensured that dev->hard_header_len includes three pad
3259 * bytes. By posting a single fragment to the adapter,
3260 * we'll reduce the number of descriptor fetches and
3261 * bus traffic needed to send the request.
3262 *
3263 * Also, we can't free the skb until after it's been DMA'd
3264 * out by the adapter, so we'll queue it in the driver and
3265 * return it in dfx_xmt_done.
3266 *
3267 * Return Codes:
3268 * 0 - driver queued packet, link is unavailable, or skbuff was bad
3269 * 1 - caller should requeue the sk_buff for later transmission
3270 *
3271 * Assumptions:
3272 * First and foremost, we assume the incoming skb pointer
3273 * is NOT NULL and is pointing to a valid sk_buff structure.
3274 *
3275 * The outgoing packet is complete, starting with the
3276 * frame control byte including the last byte of data,
3277 * but NOT including the 4 byte CRC. We'll let the
3278 * adapter hardware generate and append the CRC.
3279 *
3280 * The entire packet is stored in one physically
3281 * contiguous buffer which is not cached and whose
3282 * 32-bit physical address can be determined.
3283 *
3284 * It's vital that this routine is NOT reentered for the
3285 * same board and that the OS is not in another section of
3286 * code (eg. dfx_int_common) for the same board on a
3287 * different thread.
3288 *
3289 * Side Effects:
3290 * None
3291 */
3292
dfx_xmt_queue_pkt(struct sk_buff * skb,struct net_device * dev)3293 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3294 struct net_device *dev)
3295 {
3296 DFX_board_t *bp = netdev_priv(dev);
3297 u8 prod; /* local transmit producer index */
3298 PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */
3299 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3300 dma_addr_t dma_addr;
3301 unsigned long flags;
3302
3303 netif_stop_queue(dev);
3304
3305 /*
3306 * Verify that incoming transmit request is OK
3307 *
3308 * Note: The packet size check is consistent with other
3309 * Linux device drivers, although the correct packet
3310 * size should be verified before calling the
3311 * transmit routine.
3312 */
3313
3314 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3315 {
3316 printk("%s: Invalid packet length - %u bytes\n",
3317 dev->name, skb->len);
3318 bp->xmt_length_errors++; /* bump error counter */
3319 netif_wake_queue(dev);
3320 dev_kfree_skb(skb);
3321 return NETDEV_TX_OK; /* return "success" */
3322 }
3323 /*
3324 * See if adapter link is available, if not, free buffer
3325 *
3326 * Note: If the link isn't available, free buffer and return 0
3327 * rather than tell the upper layer to requeue the packet.
3328 * The methodology here is that by the time the link
3329 * becomes available, the packet to be sent will be
3330 * fairly stale. By simply dropping the packet, the
3331 * higher layer protocols will eventually time out
3332 * waiting for response packets which it won't receive.
3333 */
3334
3335 if (bp->link_available == PI_K_FALSE)
3336 {
3337 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */
3338 bp->link_available = PI_K_TRUE; /* if so, set flag and continue */
3339 else
3340 {
3341 bp->xmt_discards++; /* bump error counter */
3342 dev_kfree_skb(skb); /* free sk_buff now */
3343 netif_wake_queue(dev);
3344 return NETDEV_TX_OK; /* return "success" */
3345 }
3346 }
3347
3348 /* Write the three PRH bytes immediately before the FC byte */
3349
3350 skb_push(skb, 3);
3351 skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */
3352 skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */
3353 skb->data[2] = DFX_PRH2_BYTE; /* specification */
3354
3355 dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
3356 DMA_TO_DEVICE);
3357 if (dma_mapping_error(bp->bus_dev, dma_addr)) {
3358 skb_pull(skb, 3);
3359 return NETDEV_TX_BUSY;
3360 }
3361
3362 spin_lock_irqsave(&bp->lock, flags);
3363
3364 /* Get the current producer and the next free xmt data descriptor */
3365
3366 prod = bp->rcv_xmt_reg.index.xmt_prod;
3367 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3368
3369 /*
3370 * Get pointer to auxiliary queue entry to contain information
3371 * for this packet.
3372 *
3373 * Note: The current xmt producer index will become the
3374 * current xmt completion index when we complete this
3375 * packet later on. So, we'll get the pointer to the
3376 * next auxiliary queue entry now before we bump the
3377 * producer index.
3378 */
3379
3380 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */
3381
3382 /*
3383 * Write the descriptor with buffer info and bump producer
3384 *
3385 * Note: Since we need to start DMA from the packet request
3386 * header, we'll add 3 bytes to the DMA buffer length,
3387 * and we'll determine the physical address of the
3388 * buffer from the PRH, not skb->data.
3389 *
3390 * Assumptions:
3391 * 1. Packet starts with the frame control (FC) byte
3392 * at skb->data.
3393 * 2. The 4-byte CRC is not appended to the buffer or
3394 * included in the length.
3395 * 3. Packet length (skb->len) is from FC to end of
3396 * data, inclusive.
3397 * 4. The packet length does not exceed the maximum
3398 * FDDI LLC frame length of 4491 bytes.
3399 * 5. The entire packet is contained in a physically
3400 * contiguous, non-cached, locked memory space
3401 * comprised of a single buffer pointed to by
3402 * skb->data.
3403 * 6. The physical address of the start of packet
3404 * can be determined from the virtual address
3405 * by using dma_map_single() and is only 32-bits
3406 * wide.
3407 */
3408
3409 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3410 p_xmt_descr->long_1 = (u32)dma_addr;
3411
3412 /*
3413 * Verify that descriptor is actually available
3414 *
3415 * Note: If descriptor isn't available, return 1 which tells
3416 * the upper layer to requeue the packet for later
3417 * transmission.
3418 *
3419 * We need to ensure that the producer never reaches the
3420 * completion, except to indicate that the queue is empty.
3421 */
3422
3423 if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3424 {
3425 skb_pull(skb,3);
3426 spin_unlock_irqrestore(&bp->lock, flags);
3427 return NETDEV_TX_BUSY; /* requeue packet for later */
3428 }
3429
3430 /*
3431 * Save info for this packet for xmt done indication routine
3432 *
3433 * Normally, we'd save the producer index in the p_xmt_drv_descr
3434 * structure so that we'd have it handy when we complete this
3435 * packet later (in dfx_xmt_done). However, since the current
3436 * transmit architecture guarantees a single fragment for the
3437 * entire packet, we can simply bump the completion index by
3438 * one (1) for each completed packet.
3439 *
3440 * Note: If this assumption changes and we're presented with
3441 * an inconsistent number of transmit fragments for packet
3442 * data, we'll need to modify this code to save the current
3443 * transmit producer index.
3444 */
3445
3446 p_xmt_drv_descr->p_skb = skb;
3447
3448 /* Update Type 2 register */
3449
3450 bp->rcv_xmt_reg.index.xmt_prod = prod;
3451 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3452 spin_unlock_irqrestore(&bp->lock, flags);
3453 netif_wake_queue(dev);
3454 return NETDEV_TX_OK; /* packet queued to adapter */
3455 }
3456
3457
3458 /*
3459 * ================
3460 * = dfx_xmt_done =
3461 * ================
3462 *
3463 * Overview:
3464 * Processes all frames that have been transmitted.
3465 *
3466 * Returns:
3467 * None
3468 *
3469 * Arguments:
3470 * bp - pointer to board information
3471 *
3472 * Functional Description:
3473 * For all consumed transmit descriptors that have not
3474 * yet been completed, we'll free the skb we were holding
3475 * onto using dev_kfree_skb and bump the appropriate
3476 * counters.
3477 *
3478 * Return Codes:
3479 * None
3480 *
3481 * Assumptions:
3482 * The Type 2 register is not updated in this routine. It is
3483 * assumed that it will be updated in the ISR when dfx_xmt_done
3484 * returns.
3485 *
3486 * Side Effects:
3487 * None
3488 */
3489
dfx_xmt_done(DFX_board_t * bp)3490 static int dfx_xmt_done(DFX_board_t *bp)
3491 {
3492 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3493 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
3494 u8 comp; /* local transmit completion index */
3495 int freed = 0; /* buffers freed */
3496
3497 /* Service all consumed transmit frames */
3498
3499 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3500 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3501 {
3502 /* Get pointer to the transmit driver descriptor block information */
3503
3504 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3505
3506 /* Increment transmit counters */
3507
3508 bp->xmt_total_frames++;
3509 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3510
3511 /* Return skb to operating system */
3512 comp = bp->rcv_xmt_reg.index.xmt_comp;
3513 dma_unmap_single(bp->bus_dev,
3514 bp->descr_block_virt->xmt_data[comp].long_1,
3515 p_xmt_drv_descr->p_skb->len,
3516 DMA_TO_DEVICE);
3517 dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
3518
3519 /*
3520 * Move to start of next packet by updating completion index
3521 *
3522 * Here we assume that a transmit packet request is always
3523 * serviced by posting one fragment. We can therefore
3524 * simplify the completion code by incrementing the
3525 * completion index by one. This code will need to be
3526 * modified if this assumption changes. See comments
3527 * in dfx_xmt_queue_pkt for more details.
3528 */
3529
3530 bp->rcv_xmt_reg.index.xmt_comp += 1;
3531 freed++;
3532 }
3533 return freed;
3534 }
3535
3536
3537 /*
3538 * =================
3539 * = dfx_rcv_flush =
3540 * =================
3541 *
3542 * Overview:
3543 * Remove all skb's in the receive ring.
3544 *
3545 * Returns:
3546 * None
3547 *
3548 * Arguments:
3549 * bp - pointer to board information
3550 *
3551 * Functional Description:
3552 * Free's all the dynamically allocated skb's that are
3553 * currently attached to the device receive ring. This
3554 * function is typically only used when the device is
3555 * initialized or reinitialized.
3556 *
3557 * Return Codes:
3558 * None
3559 *
3560 * Side Effects:
3561 * None
3562 */
3563 #ifdef DYNAMIC_BUFFERS
dfx_rcv_flush(DFX_board_t * bp)3564 static void dfx_rcv_flush( DFX_board_t *bp )
3565 {
3566 int i, j;
3567
3568 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3569 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3570 {
3571 struct sk_buff *skb;
3572 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3573 if (skb) {
3574 dma_unmap_single(bp->bus_dev,
3575 bp->descr_block_virt->rcv_data[i+j].long_1,
3576 PI_RCV_DATA_K_SIZE_MAX,
3577 DMA_FROM_DEVICE);
3578 dev_kfree_skb(skb);
3579 }
3580 bp->p_rcv_buff_va[i+j] = NULL;
3581 }
3582
3583 }
3584 #endif /* DYNAMIC_BUFFERS */
3585
3586 /*
3587 * =================
3588 * = dfx_xmt_flush =
3589 * =================
3590 *
3591 * Overview:
3592 * Processes all frames whether they've been transmitted
3593 * or not.
3594 *
3595 * Returns:
3596 * None
3597 *
3598 * Arguments:
3599 * bp - pointer to board information
3600 *
3601 * Functional Description:
3602 * For all produced transmit descriptors that have not
3603 * yet been completed, we'll free the skb we were holding
3604 * onto using dev_kfree_skb and bump the appropriate
3605 * counters. Of course, it's possible that some of
3606 * these transmit requests actually did go out, but we
3607 * won't make that distinction here. Finally, we'll
3608 * update the consumer index to match the producer.
3609 *
3610 * Return Codes:
3611 * None
3612 *
3613 * Assumptions:
3614 * This routine does NOT update the Type 2 register. It
3615 * is assumed that this routine is being called during a
3616 * transmit flush interrupt, or a shutdown or close routine.
3617 *
3618 * Side Effects:
3619 * None
3620 */
3621
dfx_xmt_flush(DFX_board_t * bp)3622 static void dfx_xmt_flush( DFX_board_t *bp )
3623 {
3624 u32 prod_cons; /* rcv/xmt consumer block longword */
3625 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3626 u8 comp; /* local transmit completion index */
3627
3628 /* Flush all outstanding transmit frames */
3629
3630 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3631 {
3632 /* Get pointer to the transmit driver descriptor block information */
3633
3634 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3635
3636 /* Return skb to operating system */
3637 comp = bp->rcv_xmt_reg.index.xmt_comp;
3638 dma_unmap_single(bp->bus_dev,
3639 bp->descr_block_virt->xmt_data[comp].long_1,
3640 p_xmt_drv_descr->p_skb->len,
3641 DMA_TO_DEVICE);
3642 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3643
3644 /* Increment transmit error counter */
3645
3646 bp->xmt_discards++;
3647
3648 /*
3649 * Move to start of next packet by updating completion index
3650 *
3651 * Here we assume that a transmit packet request is always
3652 * serviced by posting one fragment. We can therefore
3653 * simplify the completion code by incrementing the
3654 * completion index by one. This code will need to be
3655 * modified if this assumption changes. See comments
3656 * in dfx_xmt_queue_pkt for more details.
3657 */
3658
3659 bp->rcv_xmt_reg.index.xmt_comp += 1;
3660 }
3661
3662 /* Update the transmit consumer index in the consumer block */
3663
3664 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3665 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3666 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3667 }
3668
3669 /*
3670 * ==================
3671 * = dfx_unregister =
3672 * ==================
3673 *
3674 * Overview:
3675 * Shuts down an FDDI controller
3676 *
3677 * Returns:
3678 * Condition code
3679 *
3680 * Arguments:
3681 * bdev - pointer to device information
3682 *
3683 * Functional Description:
3684 *
3685 * Return Codes:
3686 * None
3687 *
3688 * Assumptions:
3689 * It compiles so it should work :-( (PCI cards do :-)
3690 *
3691 * Side Effects:
3692 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
3693 * freed.
3694 */
dfx_unregister(struct device * bdev)3695 static void dfx_unregister(struct device *bdev)
3696 {
3697 struct net_device *dev = dev_get_drvdata(bdev);
3698 DFX_board_t *bp = netdev_priv(dev);
3699 int dfx_bus_pci = dev_is_pci(bdev);
3700 resource_size_t bar_start[3] = {0}; /* pointers to ports */
3701 resource_size_t bar_len[3] = {0}; /* resource lengths */
3702 int alloc_size; /* total buffer size used */
3703
3704 unregister_netdev(dev);
3705
3706 alloc_size = sizeof(PI_DESCR_BLOCK) +
3707 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3708 #ifndef DYNAMIC_BUFFERS
3709 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3710 #endif
3711 sizeof(PI_CONSUMER_BLOCK) +
3712 (PI_ALIGN_K_DESC_BLK - 1);
3713 if (bp->kmalloced)
3714 dma_free_coherent(bdev, alloc_size,
3715 bp->kmalloced, bp->kmalloced_dma);
3716
3717 dfx_bus_uninit(dev);
3718
3719 dfx_get_bars(bp, bar_start, bar_len);
3720 if (bar_start[2] != 0)
3721 release_region(bar_start[2], bar_len[2]);
3722 if (bar_start[1] != 0)
3723 release_region(bar_start[1], bar_len[1]);
3724 if (dfx_use_mmio) {
3725 iounmap(bp->base.mem);
3726 release_mem_region(bar_start[0], bar_len[0]);
3727 } else
3728 release_region(bar_start[0], bar_len[0]);
3729
3730 if (dfx_bus_pci)
3731 pci_disable_device(to_pci_dev(bdev));
3732
3733 free_netdev(dev);
3734 }
3735
3736
3737 static int __maybe_unused dfx_dev_register(struct device *);
3738 static int __maybe_unused dfx_dev_unregister(struct device *);
3739
3740 #ifdef CONFIG_PCI
3741 static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3742 static void dfx_pci_unregister(struct pci_dev *);
3743
3744 static const struct pci_device_id dfx_pci_table[] = {
3745 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3746 { }
3747 };
3748 MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3749
3750 static struct pci_driver dfx_pci_driver = {
3751 .name = DRV_NAME,
3752 .id_table = dfx_pci_table,
3753 .probe = dfx_pci_register,
3754 .remove = dfx_pci_unregister,
3755 };
3756
dfx_pci_register(struct pci_dev * pdev,const struct pci_device_id * ent)3757 static int dfx_pci_register(struct pci_dev *pdev,
3758 const struct pci_device_id *ent)
3759 {
3760 return dfx_register(&pdev->dev);
3761 }
3762
dfx_pci_unregister(struct pci_dev * pdev)3763 static void dfx_pci_unregister(struct pci_dev *pdev)
3764 {
3765 dfx_unregister(&pdev->dev);
3766 }
3767 #endif /* CONFIG_PCI */
3768
3769 #ifdef CONFIG_EISA
3770 static const struct eisa_device_id dfx_eisa_table[] = {
3771 { "DEC3001", DEFEA_PROD_ID_1 },
3772 { "DEC3002", DEFEA_PROD_ID_2 },
3773 { "DEC3003", DEFEA_PROD_ID_3 },
3774 { "DEC3004", DEFEA_PROD_ID_4 },
3775 { }
3776 };
3777 MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3778
3779 static struct eisa_driver dfx_eisa_driver = {
3780 .id_table = dfx_eisa_table,
3781 .driver = {
3782 .name = DRV_NAME,
3783 .bus = &eisa_bus_type,
3784 .probe = dfx_dev_register,
3785 .remove = dfx_dev_unregister,
3786 },
3787 };
3788 #endif /* CONFIG_EISA */
3789
3790 #ifdef CONFIG_TC
3791 static struct tc_device_id const dfx_tc_table[] = {
3792 { "DEC ", "PMAF-FA " },
3793 { "DEC ", "PMAF-FD " },
3794 { "DEC ", "PMAF-FS " },
3795 { "DEC ", "PMAF-FU " },
3796 { }
3797 };
3798 MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3799
3800 static struct tc_driver dfx_tc_driver = {
3801 .id_table = dfx_tc_table,
3802 .driver = {
3803 .name = DRV_NAME,
3804 .bus = &tc_bus_type,
3805 .probe = dfx_dev_register,
3806 .remove = dfx_dev_unregister,
3807 },
3808 };
3809 #endif /* CONFIG_TC */
3810
dfx_dev_register(struct device * dev)3811 static int __maybe_unused dfx_dev_register(struct device *dev)
3812 {
3813 int status;
3814
3815 status = dfx_register(dev);
3816 if (!status)
3817 get_device(dev);
3818 return status;
3819 }
3820
dfx_dev_unregister(struct device * dev)3821 static int __maybe_unused dfx_dev_unregister(struct device *dev)
3822 {
3823 put_device(dev);
3824 dfx_unregister(dev);
3825 return 0;
3826 }
3827
3828
dfx_init(void)3829 static int dfx_init(void)
3830 {
3831 int status;
3832
3833 status = pci_register_driver(&dfx_pci_driver);
3834 if (status)
3835 goto err_pci_register;
3836
3837 status = eisa_driver_register(&dfx_eisa_driver);
3838 if (status)
3839 goto err_eisa_register;
3840
3841 status = tc_register_driver(&dfx_tc_driver);
3842 if (status)
3843 goto err_tc_register;
3844
3845 return 0;
3846
3847 err_tc_register:
3848 eisa_driver_unregister(&dfx_eisa_driver);
3849 err_eisa_register:
3850 pci_unregister_driver(&dfx_pci_driver);
3851 err_pci_register:
3852 return status;
3853 }
3854
dfx_cleanup(void)3855 static void dfx_cleanup(void)
3856 {
3857 tc_unregister_driver(&dfx_tc_driver);
3858 eisa_driver_unregister(&dfx_eisa_driver);
3859 pci_unregister_driver(&dfx_pci_driver);
3860 }
3861
3862 module_init(dfx_init);
3863 module_exit(dfx_cleanup);
3864 MODULE_AUTHOR("Lawrence V. Stefani");
3865 MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3866 DRV_VERSION " " DRV_RELDATE);
3867 MODULE_LICENSE("GPL");
3868