1 /* 2 * File Name: 3 * defxx.c 4 * 5 * Copyright Information: 6 * Copyright Digital Equipment Corporation 1996. 7 * 8 * This software may be used and distributed according to the terms of 9 * the GNU General Public License, incorporated herein by reference. 10 * 11 * Abstract: 12 * A Linux device driver supporting the Digital Equipment Corporation 13 * FDDI TURBOchannel, EISA and PCI controller families. Supported 14 * adapters include: 15 * 16 * DEC FDDIcontroller/TURBOchannel (DEFTA) 17 * DEC FDDIcontroller/EISA (DEFEA) 18 * DEC FDDIcontroller/PCI (DEFPA) 19 * 20 * The original author: 21 * LVS Lawrence V. Stefani <lstefani@yahoo.com> 22 * 23 * Maintainers: 24 * macro Maciej W. Rozycki <macro@linux-mips.org> 25 * 26 * Credits: 27 * I'd like to thank Patricia Cross for helping me get started with 28 * Linux, David Davies for a lot of help upgrading and configuring 29 * my development system and for answering many OS and driver 30 * development questions, and Alan Cox for recommendations and 31 * integration help on getting FDDI support into Linux. LVS 32 * 33 * Driver Architecture: 34 * The driver architecture is largely based on previous driver work 35 * for other operating systems. The upper edge interface and 36 * functions were largely taken from existing Linux device drivers 37 * such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C 38 * driver. 39 * 40 * Adapter Probe - 41 * The driver scans for supported EISA adapters by reading the 42 * SLOT ID register for each EISA slot and making a match 43 * against the expected value. 44 * 45 * Bus-Specific Initialization - 46 * This driver currently supports both EISA and PCI controller 47 * families. While the custom DMA chip and FDDI logic is similar 48 * or identical, the bus logic is very different. After 49 * initialization, the only bus-specific differences is in how the 50 * driver enables and disables interrupts. Other than that, the 51 * run-time critical code behaves the same on both families. 52 * It's important to note that both adapter families are configured 53 * to I/O map, rather than memory map, the adapter registers. 54 * 55 * Driver Open/Close - 56 * In the driver open routine, the driver ISR (interrupt service 57 * routine) is registered and the adapter is brought to an 58 * operational state. In the driver close routine, the opposite 59 * occurs; the driver ISR is deregistered and the adapter is 60 * brought to a safe, but closed state. Users may use consecutive 61 * commands to bring the adapter up and down as in the following 62 * example: 63 * ifconfig fddi0 up 64 * ifconfig fddi0 down 65 * ifconfig fddi0 up 66 * 67 * Driver Shutdown - 68 * Apparently, there is no shutdown or halt routine support under 69 * Linux. This routine would be called during "reboot" or 70 * "shutdown" to allow the driver to place the adapter in a safe 71 * state before a warm reboot occurs. To be really safe, the user 72 * should close the adapter before shutdown (eg. ifconfig fddi0 down) 73 * to ensure that the adapter DMA engine is taken off-line. However, 74 * the current driver code anticipates this problem and always issues 75 * a soft reset of the adapter at the beginning of driver initialization. 76 * A future driver enhancement in this area may occur in 2.1.X where 77 * Alan indicated that a shutdown handler may be implemented. 78 * 79 * Interrupt Service Routine - 80 * The driver supports shared interrupts, so the ISR is registered for 81 * each board with the appropriate flag and the pointer to that board's 82 * device structure. This provides the context during interrupt 83 * processing to support shared interrupts and multiple boards. 84 * 85 * Interrupt enabling/disabling can occur at many levels. At the host 86 * end, you can disable system interrupts, or disable interrupts at the 87 * PIC (on Intel systems). Across the bus, both EISA and PCI adapters 88 * have a bus-logic chip interrupt enable/disable as well as a DMA 89 * controller interrupt enable/disable. 90 * 91 * The driver currently enables and disables adapter interrupts at the 92 * bus-logic chip and assumes that Linux will take care of clearing or 93 * acknowledging any host-based interrupt chips. 94 * 95 * Control Functions - 96 * Control functions are those used to support functions such as adding 97 * or deleting multicast addresses, enabling or disabling packet 98 * reception filters, or other custom/proprietary commands. Presently, 99 * the driver supports the "get statistics", "set multicast list", and 100 * "set mac address" functions defined by Linux. A list of possible 101 * enhancements include: 102 * 103 * - Custom ioctl interface for executing port interface commands 104 * - Custom ioctl interface for adding unicast addresses to 105 * adapter CAM (to support bridge functions). 106 * - Custom ioctl interface for supporting firmware upgrades. 107 * 108 * Hardware (port interface) Support Routines - 109 * The driver function names that start with "dfx_hw_" represent 110 * low-level port interface routines that are called frequently. They 111 * include issuing a DMA or port control command to the adapter, 112 * resetting the adapter, or reading the adapter state. Since the 113 * driver initialization and run-time code must make calls into the 114 * port interface, these routines were written to be as generic and 115 * usable as possible. 116 * 117 * Receive Path - 118 * The adapter DMA engine supports a 256 entry receive descriptor block 119 * of which up to 255 entries can be used at any given time. The 120 * architecture is a standard producer, consumer, completion model in 121 * which the driver "produces" receive buffers to the adapter, the 122 * adapter "consumes" the receive buffers by DMAing incoming packet data, 123 * and the driver "completes" the receive buffers by servicing the 124 * incoming packet, then "produces" a new buffer and starts the cycle 125 * again. Receive buffers can be fragmented in up to 16 fragments 126 * (descriptor entries). For simplicity, this driver posts 127 * single-fragment receive buffers of 4608 bytes, then allocates a 128 * sk_buff, copies the data, then reposts the buffer. To reduce CPU 129 * utilization, a better approach would be to pass up the receive 130 * buffer (no extra copy) then allocate and post a replacement buffer. 131 * This is a performance enhancement that should be looked into at 132 * some point. 133 * 134 * Transmit Path - 135 * Like the receive path, the adapter DMA engine supports a 256 entry 136 * transmit descriptor block of which up to 255 entries can be used at 137 * any given time. Transmit buffers can be fragmented in up to 255 138 * fragments (descriptor entries). This driver always posts one 139 * fragment per transmit packet request. 140 * 141 * The fragment contains the entire packet from FC to end of data. 142 * Before posting the buffer to the adapter, the driver sets a three-byte 143 * packet request header (PRH) which is required by the Motorola MAC chip 144 * used on the adapters. The PRH tells the MAC the type of token to 145 * receive/send, whether or not to generate and append the CRC, whether 146 * synchronous or asynchronous framing is used, etc. Since the PRH 147 * definition is not necessarily consistent across all FDDI chipsets, 148 * the driver, rather than the common FDDI packet handler routines, 149 * sets these bytes. 150 * 151 * To reduce the amount of descriptor fetches needed per transmit request, 152 * the driver takes advantage of the fact that there are at least three 153 * bytes available before the skb->data field on the outgoing transmit 154 * request. This is guaranteed by having fddi_setup() in net_init.c set 155 * dev->hard_header_len to 24 bytes. 21 bytes accounts for the largest 156 * header in an 802.2 SNAP frame. The other 3 bytes are the extra "pad" 157 * bytes which we'll use to store the PRH. 158 * 159 * There's a subtle advantage to adding these pad bytes to the 160 * hard_header_len, it ensures that the data portion of the packet for 161 * an 802.2 SNAP frame is longword aligned. Other FDDI driver 162 * implementations may not need the extra padding and can start copying 163 * or DMAing directly from the FC byte which starts at skb->data. Should 164 * another driver implementation need ADDITIONAL padding, the net_init.c 165 * module should be updated and dev->hard_header_len should be increased. 166 * NOTE: To maintain the alignment on the data portion of the packet, 167 * dev->hard_header_len should always be evenly divisible by 4 and at 168 * least 24 bytes in size. 169 * 170 * Modification History: 171 * Date Name Description 172 * 16-Aug-96 LVS Created. 173 * 20-Aug-96 LVS Updated dfx_probe so that version information 174 * string is only displayed if 1 or more cards are 175 * found. Changed dfx_rcv_queue_process to copy 176 * 3 NULL bytes before FC to ensure that data is 177 * longword aligned in receive buffer. 178 * 09-Sep-96 LVS Updated dfx_ctl_set_multicast_list to enable 179 * LLC group promiscuous mode if multicast list 180 * is too large. LLC individual/group promiscuous 181 * mode is now disabled if IFF_PROMISC flag not set. 182 * dfx_xmt_queue_pkt no longer checks for NULL skb 183 * on Alan Cox recommendation. Added node address 184 * override support. 185 * 12-Sep-96 LVS Reset current address to factory address during 186 * device open. Updated transmit path to post a 187 * single fragment which includes PRH->end of data. 188 * Mar 2000 AC Did various cleanups for 2.3.x 189 * Jun 2000 jgarzik PCI and resource alloc cleanups 190 * Jul 2000 tjeerd Much cleanup and some bug fixes 191 * Sep 2000 tjeerd Fix leak on unload, cosmetic code cleanup 192 * Feb 2001 Skb allocation fixes 193 * Feb 2001 davej PCI enable cleanups. 194 * 04 Aug 2003 macro Converted to the DMA API. 195 * 14 Aug 2004 macro Fix device names reported. 196 * 14 Jun 2005 macro Use irqreturn_t. 197 * 23 Oct 2006 macro Big-endian host support. 198 * 14 Dec 2006 macro TURBOchannel support. 199 * 01 Jul 2014 macro Fixes for DMA on 64-bit hosts. 200 */ 201 202 /* Include files */ 203 #include <linux/bitops.h> 204 #include <linux/compiler.h> 205 #include <linux/delay.h> 206 #include <linux/dma-mapping.h> 207 #include <linux/eisa.h> 208 #include <linux/errno.h> 209 #include <linux/fddidevice.h> 210 #include <linux/interrupt.h> 211 #include <linux/ioport.h> 212 #include <linux/kernel.h> 213 #include <linux/module.h> 214 #include <linux/netdevice.h> 215 #include <linux/pci.h> 216 #include <linux/skbuff.h> 217 #include <linux/slab.h> 218 #include <linux/string.h> 219 #include <linux/tc.h> 220 221 #include <asm/byteorder.h> 222 #include <asm/io.h> 223 224 #include "defxx.h" 225 226 /* Version information string should be updated prior to each new release! */ 227 #define DRV_NAME "defxx" 228 #define DRV_VERSION "v1.11" 229 #define DRV_RELDATE "2014/07/01" 230 231 static char version[] = 232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE 233 " Lawrence V. Stefani and others\n"; 234 235 #define DYNAMIC_BUFFERS 1 236 237 #define SKBUFF_RX_COPYBREAK 200 238 /* 239 * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte 240 * alignment for compatibility with old EISA boards. 241 */ 242 #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128) 243 244 #ifdef CONFIG_EISA 245 #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type) 246 #else 247 #define DFX_BUS_EISA(dev) 0 248 #endif 249 250 #ifdef CONFIG_TC 251 #define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type) 252 #else 253 #define DFX_BUS_TC(dev) 0 254 #endif 255 256 #ifdef CONFIG_DEFXX_MMIO 257 #define DFX_MMIO 1 258 #else 259 #define DFX_MMIO 0 260 #endif 261 262 /* Define module-wide (static) routines */ 263 264 static void dfx_bus_init(struct net_device *dev); 265 static void dfx_bus_uninit(struct net_device *dev); 266 static void dfx_bus_config_check(DFX_board_t *bp); 267 268 static int dfx_driver_init(struct net_device *dev, 269 const char *print_name, 270 resource_size_t bar_start); 271 static int dfx_adap_init(DFX_board_t *bp, int get_buffers); 272 273 static int dfx_open(struct net_device *dev); 274 static int dfx_close(struct net_device *dev); 275 276 static void dfx_int_pr_halt_id(DFX_board_t *bp); 277 static void dfx_int_type_0_process(DFX_board_t *bp); 278 static void dfx_int_common(struct net_device *dev); 279 static irqreturn_t dfx_interrupt(int irq, void *dev_id); 280 281 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev); 282 static void dfx_ctl_set_multicast_list(struct net_device *dev); 283 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr); 284 static int dfx_ctl_update_cam(DFX_board_t *bp); 285 static int dfx_ctl_update_filters(DFX_board_t *bp); 286 287 static int dfx_hw_dma_cmd_req(DFX_board_t *bp); 288 static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data); 289 static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type); 290 static int dfx_hw_adap_state_rd(DFX_board_t *bp); 291 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type); 292 293 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers); 294 static void dfx_rcv_queue_process(DFX_board_t *bp); 295 #ifdef DYNAMIC_BUFFERS 296 static void dfx_rcv_flush(DFX_board_t *bp); 297 #else 298 static inline void dfx_rcv_flush(DFX_board_t *bp) {} 299 #endif 300 301 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, 302 struct net_device *dev); 303 static int dfx_xmt_done(DFX_board_t *bp); 304 static void dfx_xmt_flush(DFX_board_t *bp); 305 306 /* Define module-wide (static) variables */ 307 308 static struct pci_driver dfx_pci_driver; 309 static struct eisa_driver dfx_eisa_driver; 310 static struct tc_driver dfx_tc_driver; 311 312 313 /* 314 * ======================= 315 * = dfx_port_write_long = 316 * = dfx_port_read_long = 317 * ======================= 318 * 319 * Overview: 320 * Routines for reading and writing values from/to adapter 321 * 322 * Returns: 323 * None 324 * 325 * Arguments: 326 * bp - pointer to board information 327 * offset - register offset from base I/O address 328 * data - for dfx_port_write_long, this is a value to write; 329 * for dfx_port_read_long, this is a pointer to store 330 * the read value 331 * 332 * Functional Description: 333 * These routines perform the correct operation to read or write 334 * the adapter register. 335 * 336 * EISA port block base addresses are based on the slot number in which the 337 * controller is installed. For example, if the EISA controller is installed 338 * in slot 4, the port block base address is 0x4000. If the controller is 339 * installed in slot 2, the port block base address is 0x2000, and so on. 340 * This port block can be used to access PDQ, ESIC, and DEFEA on-board 341 * registers using the register offsets defined in DEFXX.H. 342 * 343 * PCI port block base addresses are assigned by the PCI BIOS or system 344 * firmware. There is one 128 byte port block which can be accessed. It 345 * allows for I/O mapping of both PDQ and PFI registers using the register 346 * offsets defined in DEFXX.H. 347 * 348 * Return Codes: 349 * None 350 * 351 * Assumptions: 352 * bp->base is a valid base I/O address for this adapter. 353 * offset is a valid register offset for this adapter. 354 * 355 * Side Effects: 356 * Rather than produce macros for these functions, these routines 357 * are defined using "inline" to ensure that the compiler will 358 * generate inline code and not waste a procedure call and return. 359 * This provides all the benefits of macros, but with the 360 * advantage of strict data type checking. 361 */ 362 363 static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data) 364 { 365 writel(data, bp->base.mem + offset); 366 mb(); 367 } 368 369 static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data) 370 { 371 outl(data, bp->base.port + offset); 372 } 373 374 static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data) 375 { 376 struct device __maybe_unused *bdev = bp->bus_dev; 377 int dfx_bus_tc = DFX_BUS_TC(bdev); 378 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 379 380 if (dfx_use_mmio) 381 dfx_writel(bp, offset, data); 382 else 383 dfx_outl(bp, offset, data); 384 } 385 386 387 static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data) 388 { 389 mb(); 390 *data = readl(bp->base.mem + offset); 391 } 392 393 static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data) 394 { 395 *data = inl(bp->base.port + offset); 396 } 397 398 static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) 399 { 400 struct device __maybe_unused *bdev = bp->bus_dev; 401 int dfx_bus_tc = DFX_BUS_TC(bdev); 402 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 403 404 if (dfx_use_mmio) 405 dfx_readl(bp, offset, data); 406 else 407 dfx_inl(bp, offset, data); 408 } 409 410 411 /* 412 * ================ 413 * = dfx_get_bars = 414 * ================ 415 * 416 * Overview: 417 * Retrieves the address range used to access control and status 418 * registers. 419 * 420 * Returns: 421 * None 422 * 423 * Arguments: 424 * bdev - pointer to device information 425 * bar_start - pointer to store the start address 426 * bar_len - pointer to store the length of the area 427 * 428 * Assumptions: 429 * I am sure there are some. 430 * 431 * Side Effects: 432 * None 433 */ 434 static void dfx_get_bars(struct device *bdev, 435 resource_size_t *bar_start, resource_size_t *bar_len) 436 { 437 int dfx_bus_pci = dev_is_pci(bdev); 438 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 439 int dfx_bus_tc = DFX_BUS_TC(bdev); 440 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 441 442 if (dfx_bus_pci) { 443 int num = dfx_use_mmio ? 0 : 1; 444 445 *bar_start = pci_resource_start(to_pci_dev(bdev), num); 446 *bar_len = pci_resource_len(to_pci_dev(bdev), num); 447 } 448 if (dfx_bus_eisa) { 449 unsigned long base_addr = to_eisa_device(bdev)->base_addr; 450 resource_size_t bar; 451 452 if (dfx_use_mmio) { 453 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2); 454 bar <<= 8; 455 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1); 456 bar <<= 8; 457 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0); 458 bar <<= 16; 459 *bar_start = bar; 460 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2); 461 bar <<= 8; 462 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1); 463 bar <<= 8; 464 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0); 465 bar <<= 16; 466 *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1; 467 } else { 468 *bar_start = base_addr; 469 *bar_len = PI_ESIC_K_CSR_IO_LEN; 470 } 471 } 472 if (dfx_bus_tc) { 473 *bar_start = to_tc_dev(bdev)->resource.start + 474 PI_TC_K_CSR_OFFSET; 475 *bar_len = PI_TC_K_CSR_LEN; 476 } 477 } 478 479 static const struct net_device_ops dfx_netdev_ops = { 480 .ndo_open = dfx_open, 481 .ndo_stop = dfx_close, 482 .ndo_start_xmit = dfx_xmt_queue_pkt, 483 .ndo_get_stats = dfx_ctl_get_stats, 484 .ndo_set_rx_mode = dfx_ctl_set_multicast_list, 485 .ndo_set_mac_address = dfx_ctl_set_mac_address, 486 }; 487 488 /* 489 * ================ 490 * = dfx_register = 491 * ================ 492 * 493 * Overview: 494 * Initializes a supported FDDI controller 495 * 496 * Returns: 497 * Condition code 498 * 499 * Arguments: 500 * bdev - pointer to device information 501 * 502 * Functional Description: 503 * 504 * Return Codes: 505 * 0 - This device (fddi0, fddi1, etc) configured successfully 506 * -EBUSY - Failed to get resources, or dfx_driver_init failed. 507 * 508 * Assumptions: 509 * It compiles so it should work :-( (PCI cards do :-) 510 * 511 * Side Effects: 512 * Device structures for FDDI adapters (fddi0, fddi1, etc) are 513 * initialized and the board resources are read and stored in 514 * the device structure. 515 */ 516 static int dfx_register(struct device *bdev) 517 { 518 static int version_disp; 519 int dfx_bus_pci = dev_is_pci(bdev); 520 int dfx_bus_tc = DFX_BUS_TC(bdev); 521 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 522 const char *print_name = dev_name(bdev); 523 struct net_device *dev; 524 DFX_board_t *bp; /* board pointer */ 525 resource_size_t bar_start = 0; /* pointer to port */ 526 resource_size_t bar_len = 0; /* resource length */ 527 int alloc_size; /* total buffer size used */ 528 struct resource *region; 529 int err = 0; 530 531 if (!version_disp) { /* display version info if adapter is found */ 532 version_disp = 1; /* set display flag to TRUE so that */ 533 printk(version); /* we only display this string ONCE */ 534 } 535 536 dev = alloc_fddidev(sizeof(*bp)); 537 if (!dev) { 538 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n", 539 print_name); 540 return -ENOMEM; 541 } 542 543 /* Enable PCI device. */ 544 if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) { 545 printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n", 546 print_name); 547 goto err_out; 548 } 549 550 SET_NETDEV_DEV(dev, bdev); 551 552 bp = netdev_priv(dev); 553 bp->bus_dev = bdev; 554 dev_set_drvdata(bdev, dev); 555 556 dfx_get_bars(bdev, &bar_start, &bar_len); 557 558 if (dfx_use_mmio) 559 region = request_mem_region(bar_start, bar_len, print_name); 560 else 561 region = request_region(bar_start, bar_len, print_name); 562 if (!region) { 563 printk(KERN_ERR "%s: Cannot reserve I/O resource " 564 "0x%lx @ 0x%lx, aborting\n", 565 print_name, (long)bar_len, (long)bar_start); 566 err = -EBUSY; 567 goto err_out_disable; 568 } 569 570 /* Set up I/O base address. */ 571 if (dfx_use_mmio) { 572 bp->base.mem = ioremap_nocache(bar_start, bar_len); 573 if (!bp->base.mem) { 574 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name); 575 err = -ENOMEM; 576 goto err_out_region; 577 } 578 } else { 579 bp->base.port = bar_start; 580 dev->base_addr = bar_start; 581 } 582 583 /* Initialize new device structure */ 584 dev->netdev_ops = &dfx_netdev_ops; 585 586 if (dfx_bus_pci) 587 pci_set_master(to_pci_dev(bdev)); 588 589 if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) { 590 err = -ENODEV; 591 goto err_out_unmap; 592 } 593 594 err = register_netdev(dev); 595 if (err) 596 goto err_out_kfree; 597 598 printk("%s: registered as %s\n", print_name, dev->name); 599 return 0; 600 601 err_out_kfree: 602 alloc_size = sizeof(PI_DESCR_BLOCK) + 603 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX + 604 #ifndef DYNAMIC_BUFFERS 605 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + 606 #endif 607 sizeof(PI_CONSUMER_BLOCK) + 608 (PI_ALIGN_K_DESC_BLK - 1); 609 if (bp->kmalloced) 610 dma_free_coherent(bdev, alloc_size, 611 bp->kmalloced, bp->kmalloced_dma); 612 613 err_out_unmap: 614 if (dfx_use_mmio) 615 iounmap(bp->base.mem); 616 617 err_out_region: 618 if (dfx_use_mmio) 619 release_mem_region(bar_start, bar_len); 620 else 621 release_region(bar_start, bar_len); 622 623 err_out_disable: 624 if (dfx_bus_pci) 625 pci_disable_device(to_pci_dev(bdev)); 626 627 err_out: 628 free_netdev(dev); 629 return err; 630 } 631 632 633 /* 634 * ================ 635 * = dfx_bus_init = 636 * ================ 637 * 638 * Overview: 639 * Initializes the bus-specific controller logic. 640 * 641 * Returns: 642 * None 643 * 644 * Arguments: 645 * dev - pointer to device information 646 * 647 * Functional Description: 648 * Determine and save adapter IRQ in device table, 649 * then perform bus-specific logic initialization. 650 * 651 * Return Codes: 652 * None 653 * 654 * Assumptions: 655 * bp->base has already been set with the proper 656 * base I/O address for this device. 657 * 658 * Side Effects: 659 * Interrupts are enabled at the adapter bus-specific logic. 660 * Note: Interrupts at the DMA engine (PDQ chip) are not 661 * enabled yet. 662 */ 663 664 static void dfx_bus_init(struct net_device *dev) 665 { 666 DFX_board_t *bp = netdev_priv(dev); 667 struct device *bdev = bp->bus_dev; 668 int dfx_bus_pci = dev_is_pci(bdev); 669 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 670 int dfx_bus_tc = DFX_BUS_TC(bdev); 671 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 672 u8 val; 673 674 DBG_printk("In dfx_bus_init...\n"); 675 676 /* Initialize a pointer back to the net_device struct */ 677 bp->dev = dev; 678 679 /* Initialize adapter based on bus type */ 680 681 if (dfx_bus_tc) 682 dev->irq = to_tc_dev(bdev)->interrupt; 683 if (dfx_bus_eisa) { 684 unsigned long base_addr = to_eisa_device(bdev)->base_addr; 685 686 /* Get the interrupt level from the ESIC chip. */ 687 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); 688 val &= PI_CONFIG_STAT_0_M_IRQ; 689 val >>= PI_CONFIG_STAT_0_V_IRQ; 690 691 switch (val) { 692 case PI_CONFIG_STAT_0_IRQ_K_9: 693 dev->irq = 9; 694 break; 695 696 case PI_CONFIG_STAT_0_IRQ_K_10: 697 dev->irq = 10; 698 break; 699 700 case PI_CONFIG_STAT_0_IRQ_K_11: 701 dev->irq = 11; 702 break; 703 704 case PI_CONFIG_STAT_0_IRQ_K_15: 705 dev->irq = 15; 706 break; 707 } 708 709 /* 710 * Enable memory decoding (MEMCS0) and/or port decoding 711 * (IOCS1/IOCS0) as appropriate in Function Control 712 * Register. One of the port chip selects seems to be 713 * used for the Burst Holdoff register, but this bit of 714 * documentation is missing and as yet it has not been 715 * determined which of the two. This is also the reason 716 * the size of the decoded port range is twice as large 717 * as one required by the PDQ. 718 */ 719 720 /* Set the decode range of the board. */ 721 val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT); 722 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val); 723 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0); 724 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val); 725 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0); 726 val = PI_ESIC_K_CSR_IO_LEN - 1; 727 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff); 728 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff); 729 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff); 730 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff); 731 732 /* Enable the decoders. */ 733 val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0; 734 if (dfx_use_mmio) 735 val |= PI_FUNCTION_CNTRL_M_MEMCS0; 736 outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val); 737 738 /* 739 * Enable access to the rest of the module 740 * (including PDQ and packet memory). 741 */ 742 val = PI_SLOT_CNTRL_M_ENB; 743 outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val); 744 745 /* 746 * Map PDQ registers into memory or port space. This is 747 * done with a bit in the Burst Holdoff register. 748 */ 749 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF); 750 if (dfx_use_mmio) 751 val |= PI_BURST_HOLDOFF_V_MEM_MAP; 752 else 753 val &= ~PI_BURST_HOLDOFF_V_MEM_MAP; 754 outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val); 755 756 /* Enable interrupts at EISA bus interface chip (ESIC) */ 757 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); 758 val |= PI_CONFIG_STAT_0_M_INT_ENB; 759 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val); 760 } 761 if (dfx_bus_pci) { 762 struct pci_dev *pdev = to_pci_dev(bdev); 763 764 /* Get the interrupt level from the PCI Configuration Table */ 765 766 dev->irq = pdev->irq; 767 768 /* Check Latency Timer and set if less than minimal */ 769 770 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val); 771 if (val < PFI_K_LAT_TIMER_MIN) { 772 val = PFI_K_LAT_TIMER_DEF; 773 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val); 774 } 775 776 /* Enable interrupts at PCI bus interface chip (PFI) */ 777 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB; 778 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val); 779 } 780 } 781 782 /* 783 * ================== 784 * = dfx_bus_uninit = 785 * ================== 786 * 787 * Overview: 788 * Uninitializes the bus-specific controller logic. 789 * 790 * Returns: 791 * None 792 * 793 * Arguments: 794 * dev - pointer to device information 795 * 796 * Functional Description: 797 * Perform bus-specific logic uninitialization. 798 * 799 * Return Codes: 800 * None 801 * 802 * Assumptions: 803 * bp->base has already been set with the proper 804 * base I/O address for this device. 805 * 806 * Side Effects: 807 * Interrupts are disabled at the adapter bus-specific logic. 808 */ 809 810 static void dfx_bus_uninit(struct net_device *dev) 811 { 812 DFX_board_t *bp = netdev_priv(dev); 813 struct device *bdev = bp->bus_dev; 814 int dfx_bus_pci = dev_is_pci(bdev); 815 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 816 u8 val; 817 818 DBG_printk("In dfx_bus_uninit...\n"); 819 820 /* Uninitialize adapter based on bus type */ 821 822 if (dfx_bus_eisa) { 823 unsigned long base_addr = to_eisa_device(bdev)->base_addr; 824 825 /* Disable interrupts at EISA bus interface chip (ESIC) */ 826 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); 827 val &= ~PI_CONFIG_STAT_0_M_INT_ENB; 828 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val); 829 } 830 if (dfx_bus_pci) { 831 /* Disable interrupts at PCI bus interface chip (PFI) */ 832 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0); 833 } 834 } 835 836 837 /* 838 * ======================== 839 * = dfx_bus_config_check = 840 * ======================== 841 * 842 * Overview: 843 * Checks the configuration (burst size, full-duplex, etc.) If any parameters 844 * are illegal, then this routine will set new defaults. 845 * 846 * Returns: 847 * None 848 * 849 * Arguments: 850 * bp - pointer to board information 851 * 852 * Functional Description: 853 * For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later 854 * PDQ, and all FDDI PCI controllers, all values are legal. 855 * 856 * Return Codes: 857 * None 858 * 859 * Assumptions: 860 * dfx_adap_init has NOT been called yet so burst size and other items have 861 * not been set. 862 * 863 * Side Effects: 864 * None 865 */ 866 867 static void dfx_bus_config_check(DFX_board_t *bp) 868 { 869 struct device __maybe_unused *bdev = bp->bus_dev; 870 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 871 int status; /* return code from adapter port control call */ 872 u32 host_data; /* LW data returned from port control call */ 873 874 DBG_printk("In dfx_bus_config_check...\n"); 875 876 /* Configuration check only valid for EISA adapter */ 877 878 if (dfx_bus_eisa) { 879 /* 880 * First check if revision 2 EISA controller. Rev. 1 cards used 881 * PDQ revision B, so no workaround needed in this case. Rev. 3 882 * cards used PDQ revision E, so no workaround needed in this 883 * case, either. Only Rev. 2 cards used either Rev. D or E 884 * chips, so we must verify the chip revision on Rev. 2 cards. 885 */ 886 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) { 887 /* 888 * Revision 2 FDDI EISA controller found, 889 * so let's check PDQ revision of adapter. 890 */ 891 status = dfx_hw_port_ctrl_req(bp, 892 PI_PCTRL_M_SUB_CMD, 893 PI_SUB_CMD_K_PDQ_REV_GET, 894 0, 895 &host_data); 896 if ((status != DFX_K_SUCCESS) || (host_data == 2)) 897 { 898 /* 899 * Either we couldn't determine the PDQ revision, or 900 * we determined that it is at revision D. In either case, 901 * we need to implement the workaround. 902 */ 903 904 /* Ensure that the burst size is set to 8 longwords or less */ 905 906 switch (bp->burst_size) 907 { 908 case PI_PDATA_B_DMA_BURST_SIZE_32: 909 case PI_PDATA_B_DMA_BURST_SIZE_16: 910 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8; 911 break; 912 913 default: 914 break; 915 } 916 917 /* Ensure that full-duplex mode is not enabled */ 918 919 bp->full_duplex_enb = PI_SNMP_K_FALSE; 920 } 921 } 922 } 923 } 924 925 926 /* 927 * =================== 928 * = dfx_driver_init = 929 * =================== 930 * 931 * Overview: 932 * Initializes remaining adapter board structure information 933 * and makes sure adapter is in a safe state prior to dfx_open(). 934 * 935 * Returns: 936 * Condition code 937 * 938 * Arguments: 939 * dev - pointer to device information 940 * print_name - printable device name 941 * 942 * Functional Description: 943 * This function allocates additional resources such as the host memory 944 * blocks needed by the adapter (eg. descriptor and consumer blocks). 945 * Remaining bus initialization steps are also completed. The adapter 946 * is also reset so that it is in the DMA_UNAVAILABLE state. The OS 947 * must call dfx_open() to open the adapter and bring it on-line. 948 * 949 * Return Codes: 950 * DFX_K_SUCCESS - initialization succeeded 951 * DFX_K_FAILURE - initialization failed - could not allocate memory 952 * or read adapter MAC address 953 * 954 * Assumptions: 955 * Memory allocated from pci_alloc_consistent() call is physically 956 * contiguous, locked memory. 957 * 958 * Side Effects: 959 * Adapter is reset and should be in DMA_UNAVAILABLE state before 960 * returning from this routine. 961 */ 962 963 static int dfx_driver_init(struct net_device *dev, const char *print_name, 964 resource_size_t bar_start) 965 { 966 DFX_board_t *bp = netdev_priv(dev); 967 struct device *bdev = bp->bus_dev; 968 int dfx_bus_pci = dev_is_pci(bdev); 969 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 970 int dfx_bus_tc = DFX_BUS_TC(bdev); 971 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 972 int alloc_size; /* total buffer size needed */ 973 char *top_v, *curr_v; /* virtual addrs into memory block */ 974 dma_addr_t top_p, curr_p; /* physical addrs into memory block */ 975 u32 data; /* host data register value */ 976 __le32 le32; 977 char *board_name = NULL; 978 979 DBG_printk("In dfx_driver_init...\n"); 980 981 /* Initialize bus-specific hardware registers */ 982 983 dfx_bus_init(dev); 984 985 /* 986 * Initialize default values for configurable parameters 987 * 988 * Note: All of these parameters are ones that a user may 989 * want to customize. It'd be nice to break these 990 * out into Space.c or someplace else that's more 991 * accessible/understandable than this file. 992 */ 993 994 bp->full_duplex_enb = PI_SNMP_K_FALSE; 995 bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */ 996 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF; 997 bp->rcv_bufs_to_post = RCV_BUFS_DEF; 998 999 /* 1000 * Ensure that HW configuration is OK 1001 * 1002 * Note: Depending on the hardware revision, we may need to modify 1003 * some of the configurable parameters to workaround hardware 1004 * limitations. We'll perform this configuration check AFTER 1005 * setting the parameters to their default values. 1006 */ 1007 1008 dfx_bus_config_check(bp); 1009 1010 /* Disable PDQ interrupts first */ 1011 1012 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); 1013 1014 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */ 1015 1016 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST); 1017 1018 /* Read the factory MAC address from the adapter then save it */ 1019 1020 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0, 1021 &data) != DFX_K_SUCCESS) { 1022 printk("%s: Could not read adapter factory MAC address!\n", 1023 print_name); 1024 return DFX_K_FAILURE; 1025 } 1026 le32 = cpu_to_le32(data); 1027 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32)); 1028 1029 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0, 1030 &data) != DFX_K_SUCCESS) { 1031 printk("%s: Could not read adapter factory MAC address!\n", 1032 print_name); 1033 return DFX_K_FAILURE; 1034 } 1035 le32 = cpu_to_le32(data); 1036 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16)); 1037 1038 /* 1039 * Set current address to factory address 1040 * 1041 * Note: Node address override support is handled through 1042 * dfx_ctl_set_mac_address. 1043 */ 1044 1045 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); 1046 if (dfx_bus_tc) 1047 board_name = "DEFTA"; 1048 if (dfx_bus_eisa) 1049 board_name = "DEFEA"; 1050 if (dfx_bus_pci) 1051 board_name = "DEFPA"; 1052 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n", 1053 print_name, board_name, dfx_use_mmio ? "" : "I/O ", 1054 (long long)bar_start, dev->irq, dev->dev_addr); 1055 1056 /* 1057 * Get memory for descriptor block, consumer block, and other buffers 1058 * that need to be DMA read or written to by the adapter. 1059 */ 1060 1061 alloc_size = sizeof(PI_DESCR_BLOCK) + 1062 PI_CMD_REQ_K_SIZE_MAX + 1063 PI_CMD_RSP_K_SIZE_MAX + 1064 #ifndef DYNAMIC_BUFFERS 1065 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + 1066 #endif 1067 sizeof(PI_CONSUMER_BLOCK) + 1068 (PI_ALIGN_K_DESC_BLK - 1); 1069 bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, 1070 &bp->kmalloced_dma, 1071 GFP_ATOMIC); 1072 if (top_v == NULL) 1073 return DFX_K_FAILURE; 1074 1075 top_p = bp->kmalloced_dma; /* get physical address of buffer */ 1076 1077 /* 1078 * To guarantee the 8K alignment required for the descriptor block, 8K - 1 1079 * plus the amount of memory needed was allocated. The physical address 1080 * is now 8K aligned. By carving up the memory in a specific order, 1081 * we'll guarantee the alignment requirements for all other structures. 1082 * 1083 * Note: If the assumptions change regarding the non-paged, non-cached, 1084 * physically contiguous nature of the memory block or the address 1085 * alignments, then we'll need to implement a different algorithm 1086 * for allocating the needed memory. 1087 */ 1088 1089 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK); 1090 curr_v = top_v + (curr_p - top_p); 1091 1092 /* Reserve space for descriptor block */ 1093 1094 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v; 1095 bp->descr_block_phys = curr_p; 1096 curr_v += sizeof(PI_DESCR_BLOCK); 1097 curr_p += sizeof(PI_DESCR_BLOCK); 1098 1099 /* Reserve space for command request buffer */ 1100 1101 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v; 1102 bp->cmd_req_phys = curr_p; 1103 curr_v += PI_CMD_REQ_K_SIZE_MAX; 1104 curr_p += PI_CMD_REQ_K_SIZE_MAX; 1105 1106 /* Reserve space for command response buffer */ 1107 1108 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v; 1109 bp->cmd_rsp_phys = curr_p; 1110 curr_v += PI_CMD_RSP_K_SIZE_MAX; 1111 curr_p += PI_CMD_RSP_K_SIZE_MAX; 1112 1113 /* Reserve space for the LLC host receive queue buffers */ 1114 1115 bp->rcv_block_virt = curr_v; 1116 bp->rcv_block_phys = curr_p; 1117 1118 #ifndef DYNAMIC_BUFFERS 1119 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX); 1120 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX); 1121 #endif 1122 1123 /* Reserve space for the consumer block */ 1124 1125 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v; 1126 bp->cons_block_phys = curr_p; 1127 1128 /* Display virtual and physical addresses if debug driver */ 1129 1130 DBG_printk("%s: Descriptor block virt = %p, phys = %pad\n", 1131 print_name, bp->descr_block_virt, &bp->descr_block_phys); 1132 DBG_printk("%s: Command Request buffer virt = %p, phys = %pad\n", 1133 print_name, bp->cmd_req_virt, &bp->cmd_req_phys); 1134 DBG_printk("%s: Command Response buffer virt = %p, phys = %pad\n", 1135 print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys); 1136 DBG_printk("%s: Receive buffer block virt = %p, phys = %pad\n", 1137 print_name, bp->rcv_block_virt, &bp->rcv_block_phys); 1138 DBG_printk("%s: Consumer block virt = %p, phys = %pad\n", 1139 print_name, bp->cons_block_virt, &bp->cons_block_phys); 1140 1141 return DFX_K_SUCCESS; 1142 } 1143 1144 1145 /* 1146 * ================= 1147 * = dfx_adap_init = 1148 * ================= 1149 * 1150 * Overview: 1151 * Brings the adapter to the link avail/link unavailable state. 1152 * 1153 * Returns: 1154 * Condition code 1155 * 1156 * Arguments: 1157 * bp - pointer to board information 1158 * get_buffers - non-zero if buffers to be allocated 1159 * 1160 * Functional Description: 1161 * Issues the low-level firmware/hardware calls necessary to bring 1162 * the adapter up, or to properly reset and restore adapter during 1163 * run-time. 1164 * 1165 * Return Codes: 1166 * DFX_K_SUCCESS - Adapter brought up successfully 1167 * DFX_K_FAILURE - Adapter initialization failed 1168 * 1169 * Assumptions: 1170 * bp->reset_type should be set to a valid reset type value before 1171 * calling this routine. 1172 * 1173 * Side Effects: 1174 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state 1175 * upon a successful return of this routine. 1176 */ 1177 1178 static int dfx_adap_init(DFX_board_t *bp, int get_buffers) 1179 { 1180 DBG_printk("In dfx_adap_init...\n"); 1181 1182 /* Disable PDQ interrupts first */ 1183 1184 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); 1185 1186 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */ 1187 1188 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS) 1189 { 1190 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name); 1191 return DFX_K_FAILURE; 1192 } 1193 1194 /* 1195 * When the PDQ is reset, some false Type 0 interrupts may be pending, 1196 * so we'll acknowledge all Type 0 interrupts now before continuing. 1197 */ 1198 1199 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0); 1200 1201 /* 1202 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state 1203 * 1204 * Note: We only need to clear host copies of these registers. The PDQ reset 1205 * takes care of the on-board register values. 1206 */ 1207 1208 bp->cmd_req_reg.lword = 0; 1209 bp->cmd_rsp_reg.lword = 0; 1210 bp->rcv_xmt_reg.lword = 0; 1211 1212 /* Clear consumer block before going to DMA_AVAILABLE state */ 1213 1214 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK)); 1215 1216 /* Initialize the DMA Burst Size */ 1217 1218 if (dfx_hw_port_ctrl_req(bp, 1219 PI_PCTRL_M_SUB_CMD, 1220 PI_SUB_CMD_K_BURST_SIZE_SET, 1221 bp->burst_size, 1222 NULL) != DFX_K_SUCCESS) 1223 { 1224 printk("%s: Could not set adapter burst size!\n", bp->dev->name); 1225 return DFX_K_FAILURE; 1226 } 1227 1228 /* 1229 * Set base address of Consumer Block 1230 * 1231 * Assumption: 32-bit physical address of consumer block is 64 byte 1232 * aligned. That is, bits 0-5 of the address must be zero. 1233 */ 1234 1235 if (dfx_hw_port_ctrl_req(bp, 1236 PI_PCTRL_M_CONS_BLOCK, 1237 bp->cons_block_phys, 1238 0, 1239 NULL) != DFX_K_SUCCESS) 1240 { 1241 printk("%s: Could not set consumer block address!\n", bp->dev->name); 1242 return DFX_K_FAILURE; 1243 } 1244 1245 /* 1246 * Set the base address of Descriptor Block and bring adapter 1247 * to DMA_AVAILABLE state. 1248 * 1249 * Note: We also set the literal and data swapping requirements 1250 * in this command. 1251 * 1252 * Assumption: 32-bit physical address of descriptor block 1253 * is 8Kbyte aligned. 1254 */ 1255 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT, 1256 (u32)(bp->descr_block_phys | 1257 PI_PDATA_A_INIT_M_BSWAP_INIT), 1258 0, NULL) != DFX_K_SUCCESS) { 1259 printk("%s: Could not set descriptor block address!\n", 1260 bp->dev->name); 1261 return DFX_K_FAILURE; 1262 } 1263 1264 /* Set transmit flush timeout value */ 1265 1266 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET; 1267 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME; 1268 bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */ 1269 bp->cmd_req_virt->char_set.item[0].item_index = 0; 1270 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL; 1271 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 1272 { 1273 printk("%s: DMA command request failed!\n", bp->dev->name); 1274 return DFX_K_FAILURE; 1275 } 1276 1277 /* Set the initial values for eFDXEnable and MACTReq MIB objects */ 1278 1279 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET; 1280 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS; 1281 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb; 1282 bp->cmd_req_virt->snmp_set.item[0].item_index = 0; 1283 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ; 1284 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt; 1285 bp->cmd_req_virt->snmp_set.item[1].item_index = 0; 1286 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL; 1287 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 1288 { 1289 printk("%s: DMA command request failed!\n", bp->dev->name); 1290 return DFX_K_FAILURE; 1291 } 1292 1293 /* Initialize adapter CAM */ 1294 1295 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 1296 { 1297 printk("%s: Adapter CAM update failed!\n", bp->dev->name); 1298 return DFX_K_FAILURE; 1299 } 1300 1301 /* Initialize adapter filters */ 1302 1303 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) 1304 { 1305 printk("%s: Adapter filters update failed!\n", bp->dev->name); 1306 return DFX_K_FAILURE; 1307 } 1308 1309 /* 1310 * Remove any existing dynamic buffers (i.e. if the adapter is being 1311 * reinitialized) 1312 */ 1313 1314 if (get_buffers) 1315 dfx_rcv_flush(bp); 1316 1317 /* Initialize receive descriptor block and produce buffers */ 1318 1319 if (dfx_rcv_init(bp, get_buffers)) 1320 { 1321 printk("%s: Receive buffer allocation failed\n", bp->dev->name); 1322 if (get_buffers) 1323 dfx_rcv_flush(bp); 1324 return DFX_K_FAILURE; 1325 } 1326 1327 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */ 1328 1329 bp->cmd_req_virt->cmd_type = PI_CMD_K_START; 1330 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 1331 { 1332 printk("%s: Start command failed\n", bp->dev->name); 1333 if (get_buffers) 1334 dfx_rcv_flush(bp); 1335 return DFX_K_FAILURE; 1336 } 1337 1338 /* Initialization succeeded, reenable PDQ interrupts */ 1339 1340 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS); 1341 return DFX_K_SUCCESS; 1342 } 1343 1344 1345 /* 1346 * ============ 1347 * = dfx_open = 1348 * ============ 1349 * 1350 * Overview: 1351 * Opens the adapter 1352 * 1353 * Returns: 1354 * Condition code 1355 * 1356 * Arguments: 1357 * dev - pointer to device information 1358 * 1359 * Functional Description: 1360 * This function brings the adapter to an operational state. 1361 * 1362 * Return Codes: 1363 * 0 - Adapter was successfully opened 1364 * -EAGAIN - Could not register IRQ or adapter initialization failed 1365 * 1366 * Assumptions: 1367 * This routine should only be called for a device that was 1368 * initialized successfully. 1369 * 1370 * Side Effects: 1371 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state 1372 * if the open is successful. 1373 */ 1374 1375 static int dfx_open(struct net_device *dev) 1376 { 1377 DFX_board_t *bp = netdev_priv(dev); 1378 int ret; 1379 1380 DBG_printk("In dfx_open...\n"); 1381 1382 /* Register IRQ - support shared interrupts by passing device ptr */ 1383 1384 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name, 1385 dev); 1386 if (ret) { 1387 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq); 1388 return ret; 1389 } 1390 1391 /* 1392 * Set current address to factory MAC address 1393 * 1394 * Note: We've already done this step in dfx_driver_init. 1395 * However, it's possible that a user has set a node 1396 * address override, then closed and reopened the 1397 * adapter. Unless we reset the device address field 1398 * now, we'll continue to use the existing modified 1399 * address. 1400 */ 1401 1402 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); 1403 1404 /* Clear local unicast/multicast address tables and counts */ 1405 1406 memset(bp->uc_table, 0, sizeof(bp->uc_table)); 1407 memset(bp->mc_table, 0, sizeof(bp->mc_table)); 1408 bp->uc_count = 0; 1409 bp->mc_count = 0; 1410 1411 /* Disable promiscuous filter settings */ 1412 1413 bp->ind_group_prom = PI_FSTATE_K_BLOCK; 1414 bp->group_prom = PI_FSTATE_K_BLOCK; 1415 1416 spin_lock_init(&bp->lock); 1417 1418 /* Reset and initialize adapter */ 1419 1420 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */ 1421 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS) 1422 { 1423 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name); 1424 free_irq(dev->irq, dev); 1425 return -EAGAIN; 1426 } 1427 1428 /* Set device structure info */ 1429 netif_start_queue(dev); 1430 return 0; 1431 } 1432 1433 1434 /* 1435 * ============= 1436 * = dfx_close = 1437 * ============= 1438 * 1439 * Overview: 1440 * Closes the device/module. 1441 * 1442 * Returns: 1443 * Condition code 1444 * 1445 * Arguments: 1446 * dev - pointer to device information 1447 * 1448 * Functional Description: 1449 * This routine closes the adapter and brings it to a safe state. 1450 * The interrupt service routine is deregistered with the OS. 1451 * The adapter can be opened again with another call to dfx_open(). 1452 * 1453 * Return Codes: 1454 * Always return 0. 1455 * 1456 * Assumptions: 1457 * No further requests for this adapter are made after this routine is 1458 * called. dfx_open() can be called to reset and reinitialize the 1459 * adapter. 1460 * 1461 * Side Effects: 1462 * Adapter should be in DMA_UNAVAILABLE state upon completion of this 1463 * routine. 1464 */ 1465 1466 static int dfx_close(struct net_device *dev) 1467 { 1468 DFX_board_t *bp = netdev_priv(dev); 1469 1470 DBG_printk("In dfx_close...\n"); 1471 1472 /* Disable PDQ interrupts first */ 1473 1474 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); 1475 1476 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */ 1477 1478 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST); 1479 1480 /* 1481 * Flush any pending transmit buffers 1482 * 1483 * Note: It's important that we flush the transmit buffers 1484 * BEFORE we clear our copy of the Type 2 register. 1485 * Otherwise, we'll have no idea how many buffers 1486 * we need to free. 1487 */ 1488 1489 dfx_xmt_flush(bp); 1490 1491 /* 1492 * Clear Type 1 and Type 2 registers after adapter reset 1493 * 1494 * Note: Even though we're closing the adapter, it's 1495 * possible that an interrupt will occur after 1496 * dfx_close is called. Without some assurance to 1497 * the contrary we want to make sure that we don't 1498 * process receive and transmit LLC frames and update 1499 * the Type 2 register with bad information. 1500 */ 1501 1502 bp->cmd_req_reg.lword = 0; 1503 bp->cmd_rsp_reg.lword = 0; 1504 bp->rcv_xmt_reg.lword = 0; 1505 1506 /* Clear consumer block for the same reason given above */ 1507 1508 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK)); 1509 1510 /* Release all dynamically allocate skb in the receive ring. */ 1511 1512 dfx_rcv_flush(bp); 1513 1514 /* Clear device structure flags */ 1515 1516 netif_stop_queue(dev); 1517 1518 /* Deregister (free) IRQ */ 1519 1520 free_irq(dev->irq, dev); 1521 1522 return 0; 1523 } 1524 1525 1526 /* 1527 * ====================== 1528 * = dfx_int_pr_halt_id = 1529 * ====================== 1530 * 1531 * Overview: 1532 * Displays halt id's in string form. 1533 * 1534 * Returns: 1535 * None 1536 * 1537 * Arguments: 1538 * bp - pointer to board information 1539 * 1540 * Functional Description: 1541 * Determine current halt id and display appropriate string. 1542 * 1543 * Return Codes: 1544 * None 1545 * 1546 * Assumptions: 1547 * None 1548 * 1549 * Side Effects: 1550 * None 1551 */ 1552 1553 static void dfx_int_pr_halt_id(DFX_board_t *bp) 1554 { 1555 PI_UINT32 port_status; /* PDQ port status register value */ 1556 PI_UINT32 halt_id; /* PDQ port status halt ID */ 1557 1558 /* Read the latest port status */ 1559 1560 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); 1561 1562 /* Display halt state transition information */ 1563 1564 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID; 1565 switch (halt_id) 1566 { 1567 case PI_HALT_ID_K_SELFTEST_TIMEOUT: 1568 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name); 1569 break; 1570 1571 case PI_HALT_ID_K_PARITY_ERROR: 1572 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name); 1573 break; 1574 1575 case PI_HALT_ID_K_HOST_DIR_HALT: 1576 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name); 1577 break; 1578 1579 case PI_HALT_ID_K_SW_FAULT: 1580 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name); 1581 break; 1582 1583 case PI_HALT_ID_K_HW_FAULT: 1584 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name); 1585 break; 1586 1587 case PI_HALT_ID_K_PC_TRACE: 1588 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name); 1589 break; 1590 1591 case PI_HALT_ID_K_DMA_ERROR: 1592 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name); 1593 break; 1594 1595 case PI_HALT_ID_K_IMAGE_CRC_ERROR: 1596 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name); 1597 break; 1598 1599 case PI_HALT_ID_K_BUS_EXCEPTION: 1600 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name); 1601 break; 1602 1603 default: 1604 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id); 1605 break; 1606 } 1607 } 1608 1609 1610 /* 1611 * ========================== 1612 * = dfx_int_type_0_process = 1613 * ========================== 1614 * 1615 * Overview: 1616 * Processes Type 0 interrupts. 1617 * 1618 * Returns: 1619 * None 1620 * 1621 * Arguments: 1622 * bp - pointer to board information 1623 * 1624 * Functional Description: 1625 * Processes all enabled Type 0 interrupts. If the reason for the interrupt 1626 * is a serious fault on the adapter, then an error message is displayed 1627 * and the adapter is reset. 1628 * 1629 * One tricky potential timing window is the rapid succession of "link avail" 1630 * "link unavail" state change interrupts. The acknowledgement of the Type 0 1631 * interrupt must be done before reading the state from the Port Status 1632 * register. This is true because a state change could occur after reading 1633 * the data, but before acknowledging the interrupt. If this state change 1634 * does happen, it would be lost because the driver is using the old state, 1635 * and it will never know about the new state because it subsequently 1636 * acknowledges the state change interrupt. 1637 * 1638 * INCORRECT CORRECT 1639 * read type 0 int reasons read type 0 int reasons 1640 * read adapter state ack type 0 interrupts 1641 * ack type 0 interrupts read adapter state 1642 * ... process interrupt ... ... process interrupt ... 1643 * 1644 * Return Codes: 1645 * None 1646 * 1647 * Assumptions: 1648 * None 1649 * 1650 * Side Effects: 1651 * An adapter reset may occur if the adapter has any Type 0 error interrupts 1652 * or if the port status indicates that the adapter is halted. The driver 1653 * is responsible for reinitializing the adapter with the current CAM 1654 * contents and adapter filter settings. 1655 */ 1656 1657 static void dfx_int_type_0_process(DFX_board_t *bp) 1658 1659 { 1660 PI_UINT32 type_0_status; /* Host Interrupt Type 0 register */ 1661 PI_UINT32 state; /* current adap state (from port status) */ 1662 1663 /* 1664 * Read host interrupt Type 0 register to determine which Type 0 1665 * interrupts are pending. Immediately write it back out to clear 1666 * those interrupts. 1667 */ 1668 1669 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status); 1670 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status); 1671 1672 /* Check for Type 0 error interrupts */ 1673 1674 if (type_0_status & (PI_TYPE_0_STAT_M_NXM | 1675 PI_TYPE_0_STAT_M_PM_PAR_ERR | 1676 PI_TYPE_0_STAT_M_BUS_PAR_ERR)) 1677 { 1678 /* Check for Non-Existent Memory error */ 1679 1680 if (type_0_status & PI_TYPE_0_STAT_M_NXM) 1681 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name); 1682 1683 /* Check for Packet Memory Parity error */ 1684 1685 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR) 1686 printk("%s: Packet Memory Parity Error\n", bp->dev->name); 1687 1688 /* Check for Host Bus Parity error */ 1689 1690 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR) 1691 printk("%s: Host Bus Parity Error\n", bp->dev->name); 1692 1693 /* Reset adapter and bring it back on-line */ 1694 1695 bp->link_available = PI_K_FALSE; /* link is no longer available */ 1696 bp->reset_type = 0; /* rerun on-board diagnostics */ 1697 printk("%s: Resetting adapter...\n", bp->dev->name); 1698 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS) 1699 { 1700 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name); 1701 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); 1702 return; 1703 } 1704 printk("%s: Adapter reset successful!\n", bp->dev->name); 1705 return; 1706 } 1707 1708 /* Check for transmit flush interrupt */ 1709 1710 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH) 1711 { 1712 /* Flush any pending xmt's and acknowledge the flush interrupt */ 1713 1714 bp->link_available = PI_K_FALSE; /* link is no longer available */ 1715 dfx_xmt_flush(bp); /* flush any outstanding packets */ 1716 (void) dfx_hw_port_ctrl_req(bp, 1717 PI_PCTRL_M_XMT_DATA_FLUSH_DONE, 1718 0, 1719 0, 1720 NULL); 1721 } 1722 1723 /* Check for adapter state change */ 1724 1725 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE) 1726 { 1727 /* Get latest adapter state */ 1728 1729 state = dfx_hw_adap_state_rd(bp); /* get adapter state */ 1730 if (state == PI_STATE_K_HALTED) 1731 { 1732 /* 1733 * Adapter has transitioned to HALTED state, try to reset 1734 * adapter to bring it back on-line. If reset fails, 1735 * leave the adapter in the broken state. 1736 */ 1737 1738 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name); 1739 dfx_int_pr_halt_id(bp); /* display halt id as string */ 1740 1741 /* Reset adapter and bring it back on-line */ 1742 1743 bp->link_available = PI_K_FALSE; /* link is no longer available */ 1744 bp->reset_type = 0; /* rerun on-board diagnostics */ 1745 printk("%s: Resetting adapter...\n", bp->dev->name); 1746 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS) 1747 { 1748 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name); 1749 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); 1750 return; 1751 } 1752 printk("%s: Adapter reset successful!\n", bp->dev->name); 1753 } 1754 else if (state == PI_STATE_K_LINK_AVAIL) 1755 { 1756 bp->link_available = PI_K_TRUE; /* set link available flag */ 1757 } 1758 } 1759 } 1760 1761 1762 /* 1763 * ================== 1764 * = dfx_int_common = 1765 * ================== 1766 * 1767 * Overview: 1768 * Interrupt service routine (ISR) 1769 * 1770 * Returns: 1771 * None 1772 * 1773 * Arguments: 1774 * bp - pointer to board information 1775 * 1776 * Functional Description: 1777 * This is the ISR which processes incoming adapter interrupts. 1778 * 1779 * Return Codes: 1780 * None 1781 * 1782 * Assumptions: 1783 * This routine assumes PDQ interrupts have not been disabled. 1784 * When interrupts are disabled at the PDQ, the Port Status register 1785 * is automatically cleared. This routine uses the Port Status 1786 * register value to determine whether a Type 0 interrupt occurred, 1787 * so it's important that adapter interrupts are not normally 1788 * enabled/disabled at the PDQ. 1789 * 1790 * It's vital that this routine is NOT reentered for the 1791 * same board and that the OS is not in another section of 1792 * code (eg. dfx_xmt_queue_pkt) for the same board on a 1793 * different thread. 1794 * 1795 * Side Effects: 1796 * Pending interrupts are serviced. Depending on the type of 1797 * interrupt, acknowledging and clearing the interrupt at the 1798 * PDQ involves writing a register to clear the interrupt bit 1799 * or updating completion indices. 1800 */ 1801 1802 static void dfx_int_common(struct net_device *dev) 1803 { 1804 DFX_board_t *bp = netdev_priv(dev); 1805 PI_UINT32 port_status; /* Port Status register */ 1806 1807 /* Process xmt interrupts - frequent case, so always call this routine */ 1808 1809 if(dfx_xmt_done(bp)) /* free consumed xmt packets */ 1810 netif_wake_queue(dev); 1811 1812 /* Process rcv interrupts - frequent case, so always call this routine */ 1813 1814 dfx_rcv_queue_process(bp); /* service received LLC frames */ 1815 1816 /* 1817 * Transmit and receive producer and completion indices are updated on the 1818 * adapter by writing to the Type 2 Producer register. Since the frequent 1819 * case is that we'll be processing either LLC transmit or receive buffers, 1820 * we'll optimize I/O writes by doing a single register write here. 1821 */ 1822 1823 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); 1824 1825 /* Read PDQ Port Status register to find out which interrupts need processing */ 1826 1827 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); 1828 1829 /* Process Type 0 interrupts (if any) - infrequent, so only call when needed */ 1830 1831 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING) 1832 dfx_int_type_0_process(bp); /* process Type 0 interrupts */ 1833 } 1834 1835 1836 /* 1837 * ================= 1838 * = dfx_interrupt = 1839 * ================= 1840 * 1841 * Overview: 1842 * Interrupt processing routine 1843 * 1844 * Returns: 1845 * Whether a valid interrupt was seen. 1846 * 1847 * Arguments: 1848 * irq - interrupt vector 1849 * dev_id - pointer to device information 1850 * 1851 * Functional Description: 1852 * This routine calls the interrupt processing routine for this adapter. It 1853 * disables and reenables adapter interrupts, as appropriate. We can support 1854 * shared interrupts since the incoming dev_id pointer provides our device 1855 * structure context. 1856 * 1857 * Return Codes: 1858 * IRQ_HANDLED - an IRQ was handled. 1859 * IRQ_NONE - no IRQ was handled. 1860 * 1861 * Assumptions: 1862 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC 1863 * on Intel-based systems) is done by the operating system outside this 1864 * routine. 1865 * 1866 * System interrupts are enabled through this call. 1867 * 1868 * Side Effects: 1869 * Interrupts are disabled, then reenabled at the adapter. 1870 */ 1871 1872 static irqreturn_t dfx_interrupt(int irq, void *dev_id) 1873 { 1874 struct net_device *dev = dev_id; 1875 DFX_board_t *bp = netdev_priv(dev); 1876 struct device *bdev = bp->bus_dev; 1877 int dfx_bus_pci = dev_is_pci(bdev); 1878 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 1879 int dfx_bus_tc = DFX_BUS_TC(bdev); 1880 1881 /* Service adapter interrupts */ 1882 1883 if (dfx_bus_pci) { 1884 u32 status; 1885 1886 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status); 1887 if (!(status & PFI_STATUS_M_PDQ_INT)) 1888 return IRQ_NONE; 1889 1890 spin_lock(&bp->lock); 1891 1892 /* Disable PDQ-PFI interrupts at PFI */ 1893 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 1894 PFI_MODE_M_DMA_ENB); 1895 1896 /* Call interrupt service routine for this adapter */ 1897 dfx_int_common(dev); 1898 1899 /* Clear PDQ interrupt status bit and reenable interrupts */ 1900 dfx_port_write_long(bp, PFI_K_REG_STATUS, 1901 PFI_STATUS_M_PDQ_INT); 1902 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 1903 (PFI_MODE_M_PDQ_INT_ENB | 1904 PFI_MODE_M_DMA_ENB)); 1905 1906 spin_unlock(&bp->lock); 1907 } 1908 if (dfx_bus_eisa) { 1909 unsigned long base_addr = to_eisa_device(bdev)->base_addr; 1910 u8 status; 1911 1912 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); 1913 if (!(status & PI_CONFIG_STAT_0_M_PEND)) 1914 return IRQ_NONE; 1915 1916 spin_lock(&bp->lock); 1917 1918 /* Disable interrupts at the ESIC */ 1919 status &= ~PI_CONFIG_STAT_0_M_INT_ENB; 1920 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status); 1921 1922 /* Call interrupt service routine for this adapter */ 1923 dfx_int_common(dev); 1924 1925 /* Reenable interrupts at the ESIC */ 1926 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); 1927 status |= PI_CONFIG_STAT_0_M_INT_ENB; 1928 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status); 1929 1930 spin_unlock(&bp->lock); 1931 } 1932 if (dfx_bus_tc) { 1933 u32 status; 1934 1935 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status); 1936 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING | 1937 PI_PSTATUS_M_XMT_DATA_PENDING | 1938 PI_PSTATUS_M_SMT_HOST_PENDING | 1939 PI_PSTATUS_M_UNSOL_PENDING | 1940 PI_PSTATUS_M_CMD_RSP_PENDING | 1941 PI_PSTATUS_M_CMD_REQ_PENDING | 1942 PI_PSTATUS_M_TYPE_0_PENDING))) 1943 return IRQ_NONE; 1944 1945 spin_lock(&bp->lock); 1946 1947 /* Call interrupt service routine for this adapter */ 1948 dfx_int_common(dev); 1949 1950 spin_unlock(&bp->lock); 1951 } 1952 1953 return IRQ_HANDLED; 1954 } 1955 1956 1957 /* 1958 * ===================== 1959 * = dfx_ctl_get_stats = 1960 * ===================== 1961 * 1962 * Overview: 1963 * Get statistics for FDDI adapter 1964 * 1965 * Returns: 1966 * Pointer to FDDI statistics structure 1967 * 1968 * Arguments: 1969 * dev - pointer to device information 1970 * 1971 * Functional Description: 1972 * Gets current MIB objects from adapter, then 1973 * returns FDDI statistics structure as defined 1974 * in if_fddi.h. 1975 * 1976 * Note: Since the FDDI statistics structure is 1977 * still new and the device structure doesn't 1978 * have an FDDI-specific get statistics handler, 1979 * we'll return the FDDI statistics structure as 1980 * a pointer to an Ethernet statistics structure. 1981 * That way, at least the first part of the statistics 1982 * structure can be decoded properly, and it allows 1983 * "smart" applications to perform a second cast to 1984 * decode the FDDI-specific statistics. 1985 * 1986 * We'll have to pay attention to this routine as the 1987 * device structure becomes more mature and LAN media 1988 * independent. 1989 * 1990 * Return Codes: 1991 * None 1992 * 1993 * Assumptions: 1994 * None 1995 * 1996 * Side Effects: 1997 * None 1998 */ 1999 2000 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev) 2001 { 2002 DFX_board_t *bp = netdev_priv(dev); 2003 2004 /* Fill the bp->stats structure with driver-maintained counters */ 2005 2006 bp->stats.gen.rx_packets = bp->rcv_total_frames; 2007 bp->stats.gen.tx_packets = bp->xmt_total_frames; 2008 bp->stats.gen.rx_bytes = bp->rcv_total_bytes; 2009 bp->stats.gen.tx_bytes = bp->xmt_total_bytes; 2010 bp->stats.gen.rx_errors = bp->rcv_crc_errors + 2011 bp->rcv_frame_status_errors + 2012 bp->rcv_length_errors; 2013 bp->stats.gen.tx_errors = bp->xmt_length_errors; 2014 bp->stats.gen.rx_dropped = bp->rcv_discards; 2015 bp->stats.gen.tx_dropped = bp->xmt_discards; 2016 bp->stats.gen.multicast = bp->rcv_multicast_frames; 2017 bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */ 2018 2019 /* Get FDDI SMT MIB objects */ 2020 2021 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET; 2022 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2023 return (struct net_device_stats *)&bp->stats; 2024 2025 /* Fill the bp->stats structure with the SMT MIB object values */ 2026 2027 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id)); 2028 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id; 2029 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id; 2030 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id; 2031 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data)); 2032 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id; 2033 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct; 2034 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct; 2035 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct; 2036 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths; 2037 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities; 2038 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy; 2039 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy; 2040 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify; 2041 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy; 2042 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration; 2043 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present; 2044 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state; 2045 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state; 2046 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag; 2047 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status; 2048 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag; 2049 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls; 2050 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls; 2051 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions; 2052 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability; 2053 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability; 2054 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths; 2055 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path; 2056 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN); 2057 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN); 2058 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN); 2059 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN); 2060 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test; 2061 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths; 2062 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type; 2063 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN); 2064 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req; 2065 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg; 2066 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max; 2067 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value; 2068 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold; 2069 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio; 2070 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state; 2071 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag; 2072 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag; 2073 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag; 2074 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available; 2075 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present; 2076 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable; 2077 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound; 2078 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound; 2079 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req; 2080 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration)); 2081 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0]; 2082 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1]; 2083 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0]; 2084 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1]; 2085 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0]; 2086 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1]; 2087 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0]; 2088 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1]; 2089 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0]; 2090 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1]; 2091 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3); 2092 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3); 2093 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0]; 2094 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1]; 2095 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0]; 2096 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1]; 2097 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0]; 2098 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1]; 2099 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0]; 2100 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1]; 2101 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0]; 2102 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1]; 2103 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0]; 2104 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1]; 2105 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0]; 2106 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1]; 2107 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0]; 2108 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1]; 2109 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0]; 2110 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1]; 2111 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0]; 2112 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1]; 2113 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0]; 2114 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1]; 2115 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0]; 2116 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1]; 2117 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0]; 2118 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1]; 2119 2120 /* Get FDDI counters */ 2121 2122 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET; 2123 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2124 return (struct net_device_stats *)&bp->stats; 2125 2126 /* Fill the bp->stats structure with the FDDI counter values */ 2127 2128 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls; 2129 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls; 2130 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls; 2131 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls; 2132 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls; 2133 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls; 2134 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls; 2135 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls; 2136 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls; 2137 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; 2138 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; 2139 2140 return (struct net_device_stats *)&bp->stats; 2141 } 2142 2143 2144 /* 2145 * ============================== 2146 * = dfx_ctl_set_multicast_list = 2147 * ============================== 2148 * 2149 * Overview: 2150 * Enable/Disable LLC frame promiscuous mode reception 2151 * on the adapter and/or update multicast address table. 2152 * 2153 * Returns: 2154 * None 2155 * 2156 * Arguments: 2157 * dev - pointer to device information 2158 * 2159 * Functional Description: 2160 * This routine follows a fairly simple algorithm for setting the 2161 * adapter filters and CAM: 2162 * 2163 * if IFF_PROMISC flag is set 2164 * enable LLC individual/group promiscuous mode 2165 * else 2166 * disable LLC individual/group promiscuous mode 2167 * if number of incoming multicast addresses > 2168 * (CAM max size - number of unicast addresses in CAM) 2169 * enable LLC group promiscuous mode 2170 * set driver-maintained multicast address count to zero 2171 * else 2172 * disable LLC group promiscuous mode 2173 * set driver-maintained multicast address count to incoming count 2174 * update adapter CAM 2175 * update adapter filters 2176 * 2177 * Return Codes: 2178 * None 2179 * 2180 * Assumptions: 2181 * Multicast addresses are presented in canonical (LSB) format. 2182 * 2183 * Side Effects: 2184 * On-board adapter CAM and filters are updated. 2185 */ 2186 2187 static void dfx_ctl_set_multicast_list(struct net_device *dev) 2188 { 2189 DFX_board_t *bp = netdev_priv(dev); 2190 int i; /* used as index in for loop */ 2191 struct netdev_hw_addr *ha; 2192 2193 /* Enable LLC frame promiscuous mode, if necessary */ 2194 2195 if (dev->flags & IFF_PROMISC) 2196 bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */ 2197 2198 /* Else, update multicast address table */ 2199 2200 else 2201 { 2202 bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */ 2203 /* 2204 * Check whether incoming multicast address count exceeds table size 2205 * 2206 * Note: The adapters utilize an on-board 64 entry CAM for 2207 * supporting perfect filtering of multicast packets 2208 * and bridge functions when adding unicast addresses. 2209 * There is no hash function available. To support 2210 * additional multicast addresses, the all multicast 2211 * filter (LLC group promiscuous mode) must be enabled. 2212 * 2213 * The firmware reserves two CAM entries for SMT-related 2214 * multicast addresses, which leaves 62 entries available. 2215 * The following code ensures that we're not being asked 2216 * to add more than 62 addresses to the CAM. If we are, 2217 * the driver will enable the all multicast filter. 2218 * Should the number of multicast addresses drop below 2219 * the high water mark, the filter will be disabled and 2220 * perfect filtering will be used. 2221 */ 2222 2223 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count)) 2224 { 2225 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ 2226 bp->mc_count = 0; /* Don't add mc addrs to CAM */ 2227 } 2228 else 2229 { 2230 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */ 2231 bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */ 2232 } 2233 2234 /* Copy addresses to multicast address table, then update adapter CAM */ 2235 2236 i = 0; 2237 netdev_for_each_mc_addr(ha, dev) 2238 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN], 2239 ha->addr, FDDI_K_ALEN); 2240 2241 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 2242 { 2243 DBG_printk("%s: Could not update multicast address table!\n", dev->name); 2244 } 2245 else 2246 { 2247 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count); 2248 } 2249 } 2250 2251 /* Update adapter filters */ 2252 2253 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) 2254 { 2255 DBG_printk("%s: Could not update adapter filters!\n", dev->name); 2256 } 2257 else 2258 { 2259 DBG_printk("%s: Adapter filters updated!\n", dev->name); 2260 } 2261 } 2262 2263 2264 /* 2265 * =========================== 2266 * = dfx_ctl_set_mac_address = 2267 * =========================== 2268 * 2269 * Overview: 2270 * Add node address override (unicast address) to adapter 2271 * CAM and update dev_addr field in device table. 2272 * 2273 * Returns: 2274 * None 2275 * 2276 * Arguments: 2277 * dev - pointer to device information 2278 * addr - pointer to sockaddr structure containing unicast address to add 2279 * 2280 * Functional Description: 2281 * The adapter supports node address overrides by adding one or more 2282 * unicast addresses to the adapter CAM. This is similar to adding 2283 * multicast addresses. In this routine we'll update the driver and 2284 * device structures with the new address, then update the adapter CAM 2285 * to ensure that the adapter will copy and strip frames destined and 2286 * sourced by that address. 2287 * 2288 * Return Codes: 2289 * Always returns zero. 2290 * 2291 * Assumptions: 2292 * The address pointed to by addr->sa_data is a valid unicast 2293 * address and is presented in canonical (LSB) format. 2294 * 2295 * Side Effects: 2296 * On-board adapter CAM is updated. On-board adapter filters 2297 * may be updated. 2298 */ 2299 2300 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr) 2301 { 2302 struct sockaddr *p_sockaddr = (struct sockaddr *)addr; 2303 DFX_board_t *bp = netdev_priv(dev); 2304 2305 /* Copy unicast address to driver-maintained structs and update count */ 2306 2307 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */ 2308 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */ 2309 bp->uc_count = 1; 2310 2311 /* 2312 * Verify we're not exceeding the CAM size by adding unicast address 2313 * 2314 * Note: It's possible that before entering this routine we've 2315 * already filled the CAM with 62 multicast addresses. 2316 * Since we need to place the node address override into 2317 * the CAM, we have to check to see that we're not 2318 * exceeding the CAM size. If we are, we have to enable 2319 * the LLC group (multicast) promiscuous mode filter as 2320 * in dfx_ctl_set_multicast_list. 2321 */ 2322 2323 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE) 2324 { 2325 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ 2326 bp->mc_count = 0; /* Don't add mc addrs to CAM */ 2327 2328 /* Update adapter filters */ 2329 2330 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) 2331 { 2332 DBG_printk("%s: Could not update adapter filters!\n", dev->name); 2333 } 2334 else 2335 { 2336 DBG_printk("%s: Adapter filters updated!\n", dev->name); 2337 } 2338 } 2339 2340 /* Update adapter CAM with new unicast address */ 2341 2342 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 2343 { 2344 DBG_printk("%s: Could not set new MAC address!\n", dev->name); 2345 } 2346 else 2347 { 2348 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name); 2349 } 2350 return 0; /* always return zero */ 2351 } 2352 2353 2354 /* 2355 * ====================== 2356 * = dfx_ctl_update_cam = 2357 * ====================== 2358 * 2359 * Overview: 2360 * Procedure to update adapter CAM (Content Addressable Memory) 2361 * with desired unicast and multicast address entries. 2362 * 2363 * Returns: 2364 * Condition code 2365 * 2366 * Arguments: 2367 * bp - pointer to board information 2368 * 2369 * Functional Description: 2370 * Updates adapter CAM with current contents of board structure 2371 * unicast and multicast address tables. Since there are only 62 2372 * free entries in CAM, this routine ensures that the command 2373 * request buffer is not overrun. 2374 * 2375 * Return Codes: 2376 * DFX_K_SUCCESS - Request succeeded 2377 * DFX_K_FAILURE - Request failed 2378 * 2379 * Assumptions: 2380 * All addresses being added (unicast and multicast) are in canonical 2381 * order. 2382 * 2383 * Side Effects: 2384 * On-board adapter CAM is updated. 2385 */ 2386 2387 static int dfx_ctl_update_cam(DFX_board_t *bp) 2388 { 2389 int i; /* used as index */ 2390 PI_LAN_ADDR *p_addr; /* pointer to CAM entry */ 2391 2392 /* 2393 * Fill in command request information 2394 * 2395 * Note: Even though both the unicast and multicast address 2396 * table entries are stored as contiguous 6 byte entries, 2397 * the firmware address filter set command expects each 2398 * entry to be two longwords (8 bytes total). We must be 2399 * careful to only copy the six bytes of each unicast and 2400 * multicast table entry into each command entry. This 2401 * is also why we must first clear the entire command 2402 * request buffer. 2403 */ 2404 2405 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */ 2406 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET; 2407 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0]; 2408 2409 /* Now add unicast addresses to command request buffer, if any */ 2410 2411 for (i=0; i < (int)bp->uc_count; i++) 2412 { 2413 if (i < PI_CMD_ADDR_FILTER_K_SIZE) 2414 { 2415 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN); 2416 p_addr++; /* point to next command entry */ 2417 } 2418 } 2419 2420 /* Now add multicast addresses to command request buffer, if any */ 2421 2422 for (i=0; i < (int)bp->mc_count; i++) 2423 { 2424 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE) 2425 { 2426 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN); 2427 p_addr++; /* point to next command entry */ 2428 } 2429 } 2430 2431 /* Issue command to update adapter CAM, then return */ 2432 2433 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2434 return DFX_K_FAILURE; 2435 return DFX_K_SUCCESS; 2436 } 2437 2438 2439 /* 2440 * ========================== 2441 * = dfx_ctl_update_filters = 2442 * ========================== 2443 * 2444 * Overview: 2445 * Procedure to update adapter filters with desired 2446 * filter settings. 2447 * 2448 * Returns: 2449 * Condition code 2450 * 2451 * Arguments: 2452 * bp - pointer to board information 2453 * 2454 * Functional Description: 2455 * Enables or disables filter using current filter settings. 2456 * 2457 * Return Codes: 2458 * DFX_K_SUCCESS - Request succeeded. 2459 * DFX_K_FAILURE - Request failed. 2460 * 2461 * Assumptions: 2462 * We must always pass up packets destined to the broadcast 2463 * address (FF-FF-FF-FF-FF-FF), so we'll always keep the 2464 * broadcast filter enabled. 2465 * 2466 * Side Effects: 2467 * On-board adapter filters are updated. 2468 */ 2469 2470 static int dfx_ctl_update_filters(DFX_board_t *bp) 2471 { 2472 int i = 0; /* used as index */ 2473 2474 /* Fill in command request information */ 2475 2476 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET; 2477 2478 /* Initialize Broadcast filter - * ALWAYS ENABLED * */ 2479 2480 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST; 2481 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS; 2482 2483 /* Initialize LLC Individual/Group Promiscuous filter */ 2484 2485 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM; 2486 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom; 2487 2488 /* Initialize LLC Group Promiscuous filter */ 2489 2490 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM; 2491 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom; 2492 2493 /* Terminate the item code list */ 2494 2495 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL; 2496 2497 /* Issue command to update adapter filters, then return */ 2498 2499 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2500 return DFX_K_FAILURE; 2501 return DFX_K_SUCCESS; 2502 } 2503 2504 2505 /* 2506 * ====================== 2507 * = dfx_hw_dma_cmd_req = 2508 * ====================== 2509 * 2510 * Overview: 2511 * Sends PDQ DMA command to adapter firmware 2512 * 2513 * Returns: 2514 * Condition code 2515 * 2516 * Arguments: 2517 * bp - pointer to board information 2518 * 2519 * Functional Description: 2520 * The command request and response buffers are posted to the adapter in the manner 2521 * described in the PDQ Port Specification: 2522 * 2523 * 1. Command Response Buffer is posted to adapter. 2524 * 2. Command Request Buffer is posted to adapter. 2525 * 3. Command Request consumer index is polled until it indicates that request 2526 * buffer has been DMA'd to adapter. 2527 * 4. Command Response consumer index is polled until it indicates that response 2528 * buffer has been DMA'd from adapter. 2529 * 2530 * This ordering ensures that a response buffer is already available for the firmware 2531 * to use once it's done processing the request buffer. 2532 * 2533 * Return Codes: 2534 * DFX_K_SUCCESS - DMA command succeeded 2535 * DFX_K_OUTSTATE - Adapter is NOT in proper state 2536 * DFX_K_HW_TIMEOUT - DMA command timed out 2537 * 2538 * Assumptions: 2539 * Command request buffer has already been filled with desired DMA command. 2540 * 2541 * Side Effects: 2542 * None 2543 */ 2544 2545 static int dfx_hw_dma_cmd_req(DFX_board_t *bp) 2546 { 2547 int status; /* adapter status */ 2548 int timeout_cnt; /* used in for loops */ 2549 2550 /* Make sure the adapter is in a state that we can issue the DMA command in */ 2551 2552 status = dfx_hw_adap_state_rd(bp); 2553 if ((status == PI_STATE_K_RESET) || 2554 (status == PI_STATE_K_HALTED) || 2555 (status == PI_STATE_K_DMA_UNAVAIL) || 2556 (status == PI_STATE_K_UPGRADE)) 2557 return DFX_K_OUTSTATE; 2558 2559 /* Put response buffer on the command response queue */ 2560 2561 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP | 2562 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN)); 2563 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys; 2564 2565 /* Bump (and wrap) the producer index and write out to register */ 2566 2567 bp->cmd_rsp_reg.index.prod += 1; 2568 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1; 2569 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); 2570 2571 /* Put request buffer on the command request queue */ 2572 2573 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP | 2574 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN)); 2575 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys; 2576 2577 /* Bump (and wrap) the producer index and write out to register */ 2578 2579 bp->cmd_req_reg.index.prod += 1; 2580 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1; 2581 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword); 2582 2583 /* 2584 * Here we wait for the command request consumer index to be equal 2585 * to the producer, indicating that the adapter has DMAed the request. 2586 */ 2587 2588 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--) 2589 { 2590 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req)) 2591 break; 2592 udelay(100); /* wait for 100 microseconds */ 2593 } 2594 if (timeout_cnt == 0) 2595 return DFX_K_HW_TIMEOUT; 2596 2597 /* Bump (and wrap) the completion index and write out to register */ 2598 2599 bp->cmd_req_reg.index.comp += 1; 2600 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1; 2601 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword); 2602 2603 /* 2604 * Here we wait for the command response consumer index to be equal 2605 * to the producer, indicating that the adapter has DMAed the response. 2606 */ 2607 2608 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--) 2609 { 2610 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp)) 2611 break; 2612 udelay(100); /* wait for 100 microseconds */ 2613 } 2614 if (timeout_cnt == 0) 2615 return DFX_K_HW_TIMEOUT; 2616 2617 /* Bump (and wrap) the completion index and write out to register */ 2618 2619 bp->cmd_rsp_reg.index.comp += 1; 2620 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1; 2621 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); 2622 return DFX_K_SUCCESS; 2623 } 2624 2625 2626 /* 2627 * ======================== 2628 * = dfx_hw_port_ctrl_req = 2629 * ======================== 2630 * 2631 * Overview: 2632 * Sends PDQ port control command to adapter firmware 2633 * 2634 * Returns: 2635 * Host data register value in host_data if ptr is not NULL 2636 * 2637 * Arguments: 2638 * bp - pointer to board information 2639 * command - port control command 2640 * data_a - port data A register value 2641 * data_b - port data B register value 2642 * host_data - ptr to host data register value 2643 * 2644 * Functional Description: 2645 * Send generic port control command to adapter by writing 2646 * to various PDQ port registers, then polling for completion. 2647 * 2648 * Return Codes: 2649 * DFX_K_SUCCESS - port control command succeeded 2650 * DFX_K_HW_TIMEOUT - port control command timed out 2651 * 2652 * Assumptions: 2653 * None 2654 * 2655 * Side Effects: 2656 * None 2657 */ 2658 2659 static int dfx_hw_port_ctrl_req( 2660 DFX_board_t *bp, 2661 PI_UINT32 command, 2662 PI_UINT32 data_a, 2663 PI_UINT32 data_b, 2664 PI_UINT32 *host_data 2665 ) 2666 2667 { 2668 PI_UINT32 port_cmd; /* Port Control command register value */ 2669 int timeout_cnt; /* used in for loops */ 2670 2671 /* Set Command Error bit in command longword */ 2672 2673 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR); 2674 2675 /* Issue port command to the adapter */ 2676 2677 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a); 2678 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b); 2679 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd); 2680 2681 /* Now wait for command to complete */ 2682 2683 if (command == PI_PCTRL_M_BLAST_FLASH) 2684 timeout_cnt = 600000; /* set command timeout count to 60 seconds */ 2685 else 2686 timeout_cnt = 20000; /* set command timeout count to 2 seconds */ 2687 2688 for (; timeout_cnt > 0; timeout_cnt--) 2689 { 2690 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd); 2691 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR)) 2692 break; 2693 udelay(100); /* wait for 100 microseconds */ 2694 } 2695 if (timeout_cnt == 0) 2696 return DFX_K_HW_TIMEOUT; 2697 2698 /* 2699 * If the address of host_data is non-zero, assume caller has supplied a 2700 * non NULL pointer, and return the contents of the HOST_DATA register in 2701 * it. 2702 */ 2703 2704 if (host_data != NULL) 2705 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data); 2706 return DFX_K_SUCCESS; 2707 } 2708 2709 2710 /* 2711 * ===================== 2712 * = dfx_hw_adap_reset = 2713 * ===================== 2714 * 2715 * Overview: 2716 * Resets adapter 2717 * 2718 * Returns: 2719 * None 2720 * 2721 * Arguments: 2722 * bp - pointer to board information 2723 * type - type of reset to perform 2724 * 2725 * Functional Description: 2726 * Issue soft reset to adapter by writing to PDQ Port Reset 2727 * register. Use incoming reset type to tell adapter what 2728 * kind of reset operation to perform. 2729 * 2730 * Return Codes: 2731 * None 2732 * 2733 * Assumptions: 2734 * This routine merely issues a soft reset to the adapter. 2735 * It is expected that after this routine returns, the caller 2736 * will appropriately poll the Port Status register for the 2737 * adapter to enter the proper state. 2738 * 2739 * Side Effects: 2740 * Internal adapter registers are cleared. 2741 */ 2742 2743 static void dfx_hw_adap_reset( 2744 DFX_board_t *bp, 2745 PI_UINT32 type 2746 ) 2747 2748 { 2749 /* Set Reset type and assert reset */ 2750 2751 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */ 2752 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET); 2753 2754 /* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */ 2755 2756 udelay(20); 2757 2758 /* Deassert reset */ 2759 2760 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0); 2761 } 2762 2763 2764 /* 2765 * ======================== 2766 * = dfx_hw_adap_state_rd = 2767 * ======================== 2768 * 2769 * Overview: 2770 * Returns current adapter state 2771 * 2772 * Returns: 2773 * Adapter state per PDQ Port Specification 2774 * 2775 * Arguments: 2776 * bp - pointer to board information 2777 * 2778 * Functional Description: 2779 * Reads PDQ Port Status register and returns adapter state. 2780 * 2781 * Return Codes: 2782 * None 2783 * 2784 * Assumptions: 2785 * None 2786 * 2787 * Side Effects: 2788 * None 2789 */ 2790 2791 static int dfx_hw_adap_state_rd(DFX_board_t *bp) 2792 { 2793 PI_UINT32 port_status; /* Port Status register value */ 2794 2795 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); 2796 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE; 2797 } 2798 2799 2800 /* 2801 * ===================== 2802 * = dfx_hw_dma_uninit = 2803 * ===================== 2804 * 2805 * Overview: 2806 * Brings adapter to DMA_UNAVAILABLE state 2807 * 2808 * Returns: 2809 * Condition code 2810 * 2811 * Arguments: 2812 * bp - pointer to board information 2813 * type - type of reset to perform 2814 * 2815 * Functional Description: 2816 * Bring adapter to DMA_UNAVAILABLE state by performing the following: 2817 * 1. Set reset type bit in Port Data A Register then reset adapter. 2818 * 2. Check that adapter is in DMA_UNAVAILABLE state. 2819 * 2820 * Return Codes: 2821 * DFX_K_SUCCESS - adapter is in DMA_UNAVAILABLE state 2822 * DFX_K_HW_TIMEOUT - adapter did not reset properly 2823 * 2824 * Assumptions: 2825 * None 2826 * 2827 * Side Effects: 2828 * Internal adapter registers are cleared. 2829 */ 2830 2831 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type) 2832 { 2833 int timeout_cnt; /* used in for loops */ 2834 2835 /* Set reset type bit and reset adapter */ 2836 2837 dfx_hw_adap_reset(bp, type); 2838 2839 /* Now wait for adapter to enter DMA_UNAVAILABLE state */ 2840 2841 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--) 2842 { 2843 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL) 2844 break; 2845 udelay(100); /* wait for 100 microseconds */ 2846 } 2847 if (timeout_cnt == 0) 2848 return DFX_K_HW_TIMEOUT; 2849 return DFX_K_SUCCESS; 2850 } 2851 2852 /* 2853 * Align an sk_buff to a boundary power of 2 2854 * 2855 */ 2856 #ifdef DYNAMIC_BUFFERS 2857 static void my_skb_align(struct sk_buff *skb, int n) 2858 { 2859 unsigned long x = (unsigned long)skb->data; 2860 unsigned long v; 2861 2862 v = ALIGN(x, n); /* Where we want to be */ 2863 2864 skb_reserve(skb, v - x); 2865 } 2866 #endif 2867 2868 /* 2869 * ================ 2870 * = dfx_rcv_init = 2871 * ================ 2872 * 2873 * Overview: 2874 * Produces buffers to adapter LLC Host receive descriptor block 2875 * 2876 * Returns: 2877 * None 2878 * 2879 * Arguments: 2880 * bp - pointer to board information 2881 * get_buffers - non-zero if buffers to be allocated 2882 * 2883 * Functional Description: 2884 * This routine can be called during dfx_adap_init() or during an adapter 2885 * reset. It initializes the descriptor block and produces all allocated 2886 * LLC Host queue receive buffers. 2887 * 2888 * Return Codes: 2889 * Return 0 on success or -ENOMEM if buffer allocation failed (when using 2890 * dynamic buffer allocation). If the buffer allocation failed, the 2891 * already allocated buffers will not be released and the caller should do 2892 * this. 2893 * 2894 * Assumptions: 2895 * The PDQ has been reset and the adapter and driver maintained Type 2 2896 * register indices are cleared. 2897 * 2898 * Side Effects: 2899 * Receive buffers are posted to the adapter LLC queue and the adapter 2900 * is notified. 2901 */ 2902 2903 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers) 2904 { 2905 int i, j; /* used in for loop */ 2906 2907 /* 2908 * Since each receive buffer is a single fragment of same length, initialize 2909 * first longword in each receive descriptor for entire LLC Host descriptor 2910 * block. Also initialize second longword in each receive descriptor with 2911 * physical address of receive buffer. We'll always allocate receive 2912 * buffers in powers of 2 so that we can easily fill the 256 entry descriptor 2913 * block and produce new receive buffers by simply updating the receive 2914 * producer index. 2915 * 2916 * Assumptions: 2917 * To support all shipping versions of PDQ, the receive buffer size 2918 * must be mod 128 in length and the physical address must be 128 byte 2919 * aligned. In other words, bits 0-6 of the length and address must 2920 * be zero for the following descriptor field entries to be correct on 2921 * all PDQ-based boards. We guaranteed both requirements during 2922 * driver initialization when we allocated memory for the receive buffers. 2923 */ 2924 2925 if (get_buffers) { 2926 #ifdef DYNAMIC_BUFFERS 2927 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) 2928 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) 2929 { 2930 struct sk_buff *newskb; 2931 dma_addr_t dma_addr; 2932 2933 newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, 2934 GFP_NOIO); 2935 if (!newskb) 2936 return -ENOMEM; 2937 /* 2938 * align to 128 bytes for compatibility with 2939 * the old EISA boards. 2940 */ 2941 2942 my_skb_align(newskb, 128); 2943 dma_addr = dma_map_single(bp->bus_dev, 2944 newskb->data, 2945 PI_RCV_DATA_K_SIZE_MAX, 2946 DMA_FROM_DEVICE); 2947 if (dma_mapping_error(bp->bus_dev, dma_addr)) { 2948 dev_kfree_skb(newskb); 2949 return -ENOMEM; 2950 } 2951 bp->descr_block_virt->rcv_data[i + j].long_0 = 2952 (u32)(PI_RCV_DESCR_M_SOP | 2953 ((PI_RCV_DATA_K_SIZE_MAX / 2954 PI_ALIGN_K_RCV_DATA_BUFF) << 2955 PI_RCV_DESCR_V_SEG_LEN)); 2956 bp->descr_block_virt->rcv_data[i + j].long_1 = 2957 (u32)dma_addr; 2958 2959 /* 2960 * p_rcv_buff_va is only used inside the 2961 * kernel so we put the skb pointer here. 2962 */ 2963 bp->p_rcv_buff_va[i+j] = (char *) newskb; 2964 } 2965 #else 2966 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++) 2967 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) 2968 { 2969 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP | 2970 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN)); 2971 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX)); 2972 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX)); 2973 } 2974 #endif 2975 } 2976 2977 /* Update receive producer and Type 2 register */ 2978 2979 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post; 2980 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); 2981 return 0; 2982 } 2983 2984 2985 /* 2986 * ========================= 2987 * = dfx_rcv_queue_process = 2988 * ========================= 2989 * 2990 * Overview: 2991 * Process received LLC frames. 2992 * 2993 * Returns: 2994 * None 2995 * 2996 * Arguments: 2997 * bp - pointer to board information 2998 * 2999 * Functional Description: 3000 * Received LLC frames are processed until there are no more consumed frames. 3001 * Once all frames are processed, the receive buffers are returned to the 3002 * adapter. Note that this algorithm fixes the length of time that can be spent 3003 * in this routine, because there are a fixed number of receive buffers to 3004 * process and buffers are not produced until this routine exits and returns 3005 * to the ISR. 3006 * 3007 * Return Codes: 3008 * None 3009 * 3010 * Assumptions: 3011 * None 3012 * 3013 * Side Effects: 3014 * None 3015 */ 3016 3017 static void dfx_rcv_queue_process( 3018 DFX_board_t *bp 3019 ) 3020 3021 { 3022 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */ 3023 char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */ 3024 u32 descr, pkt_len; /* FMC descriptor field and packet length */ 3025 struct sk_buff *skb = NULL; /* pointer to a sk_buff to hold incoming packet data */ 3026 3027 /* Service all consumed LLC receive frames */ 3028 3029 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data); 3030 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons) 3031 { 3032 /* Process any errors */ 3033 dma_addr_t dma_addr; 3034 int entry; 3035 3036 entry = bp->rcv_xmt_reg.index.rcv_comp; 3037 #ifdef DYNAMIC_BUFFERS 3038 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data); 3039 #else 3040 p_buff = bp->p_rcv_buff_va[entry]; 3041 #endif 3042 dma_addr = bp->descr_block_virt->rcv_data[entry].long_1; 3043 dma_sync_single_for_cpu(bp->bus_dev, 3044 dma_addr + RCV_BUFF_K_DESCR, 3045 sizeof(u32), 3046 DMA_FROM_DEVICE); 3047 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32)); 3048 3049 if (descr & PI_FMC_DESCR_M_RCC_FLUSH) 3050 { 3051 if (descr & PI_FMC_DESCR_M_RCC_CRC) 3052 bp->rcv_crc_errors++; 3053 else 3054 bp->rcv_frame_status_errors++; 3055 } 3056 else 3057 { 3058 int rx_in_place = 0; 3059 3060 /* The frame was received without errors - verify packet length */ 3061 3062 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN); 3063 pkt_len -= 4; /* subtract 4 byte CRC */ 3064 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN)) 3065 bp->rcv_length_errors++; 3066 else{ 3067 #ifdef DYNAMIC_BUFFERS 3068 struct sk_buff *newskb = NULL; 3069 3070 if (pkt_len > SKBUFF_RX_COPYBREAK) { 3071 dma_addr_t new_dma_addr; 3072 3073 newskb = netdev_alloc_skb(bp->dev, 3074 NEW_SKB_SIZE); 3075 if (newskb){ 3076 my_skb_align(newskb, 128); 3077 new_dma_addr = dma_map_single( 3078 bp->bus_dev, 3079 newskb->data, 3080 PI_RCV_DATA_K_SIZE_MAX, 3081 DMA_FROM_DEVICE); 3082 if (dma_mapping_error( 3083 bp->bus_dev, 3084 new_dma_addr)) { 3085 dev_kfree_skb(newskb); 3086 newskb = NULL; 3087 } 3088 } 3089 if (newskb) { 3090 rx_in_place = 1; 3091 3092 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry]; 3093 dma_unmap_single(bp->bus_dev, 3094 dma_addr, 3095 PI_RCV_DATA_K_SIZE_MAX, 3096 DMA_FROM_DEVICE); 3097 skb_reserve(skb, RCV_BUFF_K_PADDING); 3098 bp->p_rcv_buff_va[entry] = (char *)newskb; 3099 bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr; 3100 } 3101 } 3102 if (!newskb) 3103 #endif 3104 /* Alloc new buffer to pass up, 3105 * add room for PRH. */ 3106 skb = netdev_alloc_skb(bp->dev, 3107 pkt_len + 3); 3108 if (skb == NULL) 3109 { 3110 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name); 3111 bp->rcv_discards++; 3112 break; 3113 } 3114 else { 3115 if (!rx_in_place) { 3116 /* Receive buffer allocated, pass receive packet up */ 3117 dma_sync_single_for_cpu( 3118 bp->bus_dev, 3119 dma_addr + 3120 RCV_BUFF_K_PADDING, 3121 pkt_len + 3, 3122 DMA_FROM_DEVICE); 3123 3124 skb_copy_to_linear_data(skb, 3125 p_buff + RCV_BUFF_K_PADDING, 3126 pkt_len + 3); 3127 } 3128 3129 skb_reserve(skb,3); /* adjust data field so that it points to FC byte */ 3130 skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */ 3131 skb->protocol = fddi_type_trans(skb, bp->dev); 3132 bp->rcv_total_bytes += skb->len; 3133 netif_rx(skb); 3134 3135 /* Update the rcv counters */ 3136 bp->rcv_total_frames++; 3137 if (*(p_buff + RCV_BUFF_K_DA) & 0x01) 3138 bp->rcv_multicast_frames++; 3139 } 3140 } 3141 } 3142 3143 /* 3144 * Advance the producer (for recycling) and advance the completion 3145 * (for servicing received frames). Note that it is okay to 3146 * advance the producer without checking that it passes the 3147 * completion index because they are both advanced at the same 3148 * rate. 3149 */ 3150 3151 bp->rcv_xmt_reg.index.rcv_prod += 1; 3152 bp->rcv_xmt_reg.index.rcv_comp += 1; 3153 } 3154 } 3155 3156 3157 /* 3158 * ===================== 3159 * = dfx_xmt_queue_pkt = 3160 * ===================== 3161 * 3162 * Overview: 3163 * Queues packets for transmission 3164 * 3165 * Returns: 3166 * Condition code 3167 * 3168 * Arguments: 3169 * skb - pointer to sk_buff to queue for transmission 3170 * dev - pointer to device information 3171 * 3172 * Functional Description: 3173 * Here we assume that an incoming skb transmit request 3174 * is contained in a single physically contiguous buffer 3175 * in which the virtual address of the start of packet 3176 * (skb->data) can be converted to a physical address 3177 * by using pci_map_single(). 3178 * 3179 * Since the adapter architecture requires a three byte 3180 * packet request header to prepend the start of packet, 3181 * we'll write the three byte field immediately prior to 3182 * the FC byte. This assumption is valid because we've 3183 * ensured that dev->hard_header_len includes three pad 3184 * bytes. By posting a single fragment to the adapter, 3185 * we'll reduce the number of descriptor fetches and 3186 * bus traffic needed to send the request. 3187 * 3188 * Also, we can't free the skb until after it's been DMA'd 3189 * out by the adapter, so we'll queue it in the driver and 3190 * return it in dfx_xmt_done. 3191 * 3192 * Return Codes: 3193 * 0 - driver queued packet, link is unavailable, or skbuff was bad 3194 * 1 - caller should requeue the sk_buff for later transmission 3195 * 3196 * Assumptions: 3197 * First and foremost, we assume the incoming skb pointer 3198 * is NOT NULL and is pointing to a valid sk_buff structure. 3199 * 3200 * The outgoing packet is complete, starting with the 3201 * frame control byte including the last byte of data, 3202 * but NOT including the 4 byte CRC. We'll let the 3203 * adapter hardware generate and append the CRC. 3204 * 3205 * The entire packet is stored in one physically 3206 * contiguous buffer which is not cached and whose 3207 * 32-bit physical address can be determined. 3208 * 3209 * It's vital that this routine is NOT reentered for the 3210 * same board and that the OS is not in another section of 3211 * code (eg. dfx_int_common) for the same board on a 3212 * different thread. 3213 * 3214 * Side Effects: 3215 * None 3216 */ 3217 3218 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, 3219 struct net_device *dev) 3220 { 3221 DFX_board_t *bp = netdev_priv(dev); 3222 u8 prod; /* local transmit producer index */ 3223 PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */ 3224 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ 3225 dma_addr_t dma_addr; 3226 unsigned long flags; 3227 3228 netif_stop_queue(dev); 3229 3230 /* 3231 * Verify that incoming transmit request is OK 3232 * 3233 * Note: The packet size check is consistent with other 3234 * Linux device drivers, although the correct packet 3235 * size should be verified before calling the 3236 * transmit routine. 3237 */ 3238 3239 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN)) 3240 { 3241 printk("%s: Invalid packet length - %u bytes\n", 3242 dev->name, skb->len); 3243 bp->xmt_length_errors++; /* bump error counter */ 3244 netif_wake_queue(dev); 3245 dev_kfree_skb(skb); 3246 return NETDEV_TX_OK; /* return "success" */ 3247 } 3248 /* 3249 * See if adapter link is available, if not, free buffer 3250 * 3251 * Note: If the link isn't available, free buffer and return 0 3252 * rather than tell the upper layer to requeue the packet. 3253 * The methodology here is that by the time the link 3254 * becomes available, the packet to be sent will be 3255 * fairly stale. By simply dropping the packet, the 3256 * higher layer protocols will eventually time out 3257 * waiting for response packets which it won't receive. 3258 */ 3259 3260 if (bp->link_available == PI_K_FALSE) 3261 { 3262 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */ 3263 bp->link_available = PI_K_TRUE; /* if so, set flag and continue */ 3264 else 3265 { 3266 bp->xmt_discards++; /* bump error counter */ 3267 dev_kfree_skb(skb); /* free sk_buff now */ 3268 netif_wake_queue(dev); 3269 return NETDEV_TX_OK; /* return "success" */ 3270 } 3271 } 3272 3273 /* Write the three PRH bytes immediately before the FC byte */ 3274 3275 skb_push(skb, 3); 3276 skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */ 3277 skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */ 3278 skb->data[2] = DFX_PRH2_BYTE; /* specification */ 3279 3280 dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len, 3281 DMA_TO_DEVICE); 3282 if (dma_mapping_error(bp->bus_dev, dma_addr)) { 3283 skb_pull(skb, 3); 3284 return NETDEV_TX_BUSY; 3285 } 3286 3287 spin_lock_irqsave(&bp->lock, flags); 3288 3289 /* Get the current producer and the next free xmt data descriptor */ 3290 3291 prod = bp->rcv_xmt_reg.index.xmt_prod; 3292 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]); 3293 3294 /* 3295 * Get pointer to auxiliary queue entry to contain information 3296 * for this packet. 3297 * 3298 * Note: The current xmt producer index will become the 3299 * current xmt completion index when we complete this 3300 * packet later on. So, we'll get the pointer to the 3301 * next auxiliary queue entry now before we bump the 3302 * producer index. 3303 */ 3304 3305 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */ 3306 3307 /* 3308 * Write the descriptor with buffer info and bump producer 3309 * 3310 * Note: Since we need to start DMA from the packet request 3311 * header, we'll add 3 bytes to the DMA buffer length, 3312 * and we'll determine the physical address of the 3313 * buffer from the PRH, not skb->data. 3314 * 3315 * Assumptions: 3316 * 1. Packet starts with the frame control (FC) byte 3317 * at skb->data. 3318 * 2. The 4-byte CRC is not appended to the buffer or 3319 * included in the length. 3320 * 3. Packet length (skb->len) is from FC to end of 3321 * data, inclusive. 3322 * 4. The packet length does not exceed the maximum 3323 * FDDI LLC frame length of 4491 bytes. 3324 * 5. The entire packet is contained in a physically 3325 * contiguous, non-cached, locked memory space 3326 * comprised of a single buffer pointed to by 3327 * skb->data. 3328 * 6. The physical address of the start of packet 3329 * can be determined from the virtual address 3330 * by using pci_map_single() and is only 32-bits 3331 * wide. 3332 */ 3333 3334 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN)); 3335 p_xmt_descr->long_1 = (u32)dma_addr; 3336 3337 /* 3338 * Verify that descriptor is actually available 3339 * 3340 * Note: If descriptor isn't available, return 1 which tells 3341 * the upper layer to requeue the packet for later 3342 * transmission. 3343 * 3344 * We need to ensure that the producer never reaches the 3345 * completion, except to indicate that the queue is empty. 3346 */ 3347 3348 if (prod == bp->rcv_xmt_reg.index.xmt_comp) 3349 { 3350 skb_pull(skb,3); 3351 spin_unlock_irqrestore(&bp->lock, flags); 3352 return NETDEV_TX_BUSY; /* requeue packet for later */ 3353 } 3354 3355 /* 3356 * Save info for this packet for xmt done indication routine 3357 * 3358 * Normally, we'd save the producer index in the p_xmt_drv_descr 3359 * structure so that we'd have it handy when we complete this 3360 * packet later (in dfx_xmt_done). However, since the current 3361 * transmit architecture guarantees a single fragment for the 3362 * entire packet, we can simply bump the completion index by 3363 * one (1) for each completed packet. 3364 * 3365 * Note: If this assumption changes and we're presented with 3366 * an inconsistent number of transmit fragments for packet 3367 * data, we'll need to modify this code to save the current 3368 * transmit producer index. 3369 */ 3370 3371 p_xmt_drv_descr->p_skb = skb; 3372 3373 /* Update Type 2 register */ 3374 3375 bp->rcv_xmt_reg.index.xmt_prod = prod; 3376 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); 3377 spin_unlock_irqrestore(&bp->lock, flags); 3378 netif_wake_queue(dev); 3379 return NETDEV_TX_OK; /* packet queued to adapter */ 3380 } 3381 3382 3383 /* 3384 * ================ 3385 * = dfx_xmt_done = 3386 * ================ 3387 * 3388 * Overview: 3389 * Processes all frames that have been transmitted. 3390 * 3391 * Returns: 3392 * None 3393 * 3394 * Arguments: 3395 * bp - pointer to board information 3396 * 3397 * Functional Description: 3398 * For all consumed transmit descriptors that have not 3399 * yet been completed, we'll free the skb we were holding 3400 * onto using dev_kfree_skb and bump the appropriate 3401 * counters. 3402 * 3403 * Return Codes: 3404 * None 3405 * 3406 * Assumptions: 3407 * The Type 2 register is not updated in this routine. It is 3408 * assumed that it will be updated in the ISR when dfx_xmt_done 3409 * returns. 3410 * 3411 * Side Effects: 3412 * None 3413 */ 3414 3415 static int dfx_xmt_done(DFX_board_t *bp) 3416 { 3417 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ 3418 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */ 3419 u8 comp; /* local transmit completion index */ 3420 int freed = 0; /* buffers freed */ 3421 3422 /* Service all consumed transmit frames */ 3423 3424 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data); 3425 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons) 3426 { 3427 /* Get pointer to the transmit driver descriptor block information */ 3428 3429 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]); 3430 3431 /* Increment transmit counters */ 3432 3433 bp->xmt_total_frames++; 3434 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len; 3435 3436 /* Return skb to operating system */ 3437 comp = bp->rcv_xmt_reg.index.xmt_comp; 3438 dma_unmap_single(bp->bus_dev, 3439 bp->descr_block_virt->xmt_data[comp].long_1, 3440 p_xmt_drv_descr->p_skb->len, 3441 DMA_TO_DEVICE); 3442 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); 3443 3444 /* 3445 * Move to start of next packet by updating completion index 3446 * 3447 * Here we assume that a transmit packet request is always 3448 * serviced by posting one fragment. We can therefore 3449 * simplify the completion code by incrementing the 3450 * completion index by one. This code will need to be 3451 * modified if this assumption changes. See comments 3452 * in dfx_xmt_queue_pkt for more details. 3453 */ 3454 3455 bp->rcv_xmt_reg.index.xmt_comp += 1; 3456 freed++; 3457 } 3458 return freed; 3459 } 3460 3461 3462 /* 3463 * ================= 3464 * = dfx_rcv_flush = 3465 * ================= 3466 * 3467 * Overview: 3468 * Remove all skb's in the receive ring. 3469 * 3470 * Returns: 3471 * None 3472 * 3473 * Arguments: 3474 * bp - pointer to board information 3475 * 3476 * Functional Description: 3477 * Free's all the dynamically allocated skb's that are 3478 * currently attached to the device receive ring. This 3479 * function is typically only used when the device is 3480 * initialized or reinitialized. 3481 * 3482 * Return Codes: 3483 * None 3484 * 3485 * Side Effects: 3486 * None 3487 */ 3488 #ifdef DYNAMIC_BUFFERS 3489 static void dfx_rcv_flush( DFX_board_t *bp ) 3490 { 3491 int i, j; 3492 3493 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) 3494 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) 3495 { 3496 struct sk_buff *skb; 3497 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j]; 3498 if (skb) { 3499 dma_unmap_single(bp->bus_dev, 3500 bp->descr_block_virt->rcv_data[i+j].long_1, 3501 PI_RCV_DATA_K_SIZE_MAX, 3502 DMA_FROM_DEVICE); 3503 dev_kfree_skb(skb); 3504 } 3505 bp->p_rcv_buff_va[i+j] = NULL; 3506 } 3507 3508 } 3509 #endif /* DYNAMIC_BUFFERS */ 3510 3511 /* 3512 * ================= 3513 * = dfx_xmt_flush = 3514 * ================= 3515 * 3516 * Overview: 3517 * Processes all frames whether they've been transmitted 3518 * or not. 3519 * 3520 * Returns: 3521 * None 3522 * 3523 * Arguments: 3524 * bp - pointer to board information 3525 * 3526 * Functional Description: 3527 * For all produced transmit descriptors that have not 3528 * yet been completed, we'll free the skb we were holding 3529 * onto using dev_kfree_skb and bump the appropriate 3530 * counters. Of course, it's possible that some of 3531 * these transmit requests actually did go out, but we 3532 * won't make that distinction here. Finally, we'll 3533 * update the consumer index to match the producer. 3534 * 3535 * Return Codes: 3536 * None 3537 * 3538 * Assumptions: 3539 * This routine does NOT update the Type 2 register. It 3540 * is assumed that this routine is being called during a 3541 * transmit flush interrupt, or a shutdown or close routine. 3542 * 3543 * Side Effects: 3544 * None 3545 */ 3546 3547 static void dfx_xmt_flush( DFX_board_t *bp ) 3548 { 3549 u32 prod_cons; /* rcv/xmt consumer block longword */ 3550 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ 3551 u8 comp; /* local transmit completion index */ 3552 3553 /* Flush all outstanding transmit frames */ 3554 3555 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod) 3556 { 3557 /* Get pointer to the transmit driver descriptor block information */ 3558 3559 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]); 3560 3561 /* Return skb to operating system */ 3562 comp = bp->rcv_xmt_reg.index.xmt_comp; 3563 dma_unmap_single(bp->bus_dev, 3564 bp->descr_block_virt->xmt_data[comp].long_1, 3565 p_xmt_drv_descr->p_skb->len, 3566 DMA_TO_DEVICE); 3567 dev_kfree_skb(p_xmt_drv_descr->p_skb); 3568 3569 /* Increment transmit error counter */ 3570 3571 bp->xmt_discards++; 3572 3573 /* 3574 * Move to start of next packet by updating completion index 3575 * 3576 * Here we assume that a transmit packet request is always 3577 * serviced by posting one fragment. We can therefore 3578 * simplify the completion code by incrementing the 3579 * completion index by one. This code will need to be 3580 * modified if this assumption changes. See comments 3581 * in dfx_xmt_queue_pkt for more details. 3582 */ 3583 3584 bp->rcv_xmt_reg.index.xmt_comp += 1; 3585 } 3586 3587 /* Update the transmit consumer index in the consumer block */ 3588 3589 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX); 3590 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX); 3591 bp->cons_block_virt->xmt_rcv_data = prod_cons; 3592 } 3593 3594 /* 3595 * ================== 3596 * = dfx_unregister = 3597 * ================== 3598 * 3599 * Overview: 3600 * Shuts down an FDDI controller 3601 * 3602 * Returns: 3603 * Condition code 3604 * 3605 * Arguments: 3606 * bdev - pointer to device information 3607 * 3608 * Functional Description: 3609 * 3610 * Return Codes: 3611 * None 3612 * 3613 * Assumptions: 3614 * It compiles so it should work :-( (PCI cards do :-) 3615 * 3616 * Side Effects: 3617 * Device structures for FDDI adapters (fddi0, fddi1, etc) are 3618 * freed. 3619 */ 3620 static void dfx_unregister(struct device *bdev) 3621 { 3622 struct net_device *dev = dev_get_drvdata(bdev); 3623 DFX_board_t *bp = netdev_priv(dev); 3624 int dfx_bus_pci = dev_is_pci(bdev); 3625 int dfx_bus_tc = DFX_BUS_TC(bdev); 3626 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 3627 resource_size_t bar_start = 0; /* pointer to port */ 3628 resource_size_t bar_len = 0; /* resource length */ 3629 int alloc_size; /* total buffer size used */ 3630 3631 unregister_netdev(dev); 3632 3633 alloc_size = sizeof(PI_DESCR_BLOCK) + 3634 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX + 3635 #ifndef DYNAMIC_BUFFERS 3636 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + 3637 #endif 3638 sizeof(PI_CONSUMER_BLOCK) + 3639 (PI_ALIGN_K_DESC_BLK - 1); 3640 if (bp->kmalloced) 3641 dma_free_coherent(bdev, alloc_size, 3642 bp->kmalloced, bp->kmalloced_dma); 3643 3644 dfx_bus_uninit(dev); 3645 3646 dfx_get_bars(bdev, &bar_start, &bar_len); 3647 if (dfx_use_mmio) { 3648 iounmap(bp->base.mem); 3649 release_mem_region(bar_start, bar_len); 3650 } else 3651 release_region(bar_start, bar_len); 3652 3653 if (dfx_bus_pci) 3654 pci_disable_device(to_pci_dev(bdev)); 3655 3656 free_netdev(dev); 3657 } 3658 3659 3660 static int __maybe_unused dfx_dev_register(struct device *); 3661 static int __maybe_unused dfx_dev_unregister(struct device *); 3662 3663 #ifdef CONFIG_PCI 3664 static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *); 3665 static void dfx_pci_unregister(struct pci_dev *); 3666 3667 static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = { 3668 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) }, 3669 { } 3670 }; 3671 MODULE_DEVICE_TABLE(pci, dfx_pci_table); 3672 3673 static struct pci_driver dfx_pci_driver = { 3674 .name = "defxx", 3675 .id_table = dfx_pci_table, 3676 .probe = dfx_pci_register, 3677 .remove = dfx_pci_unregister, 3678 }; 3679 3680 static int dfx_pci_register(struct pci_dev *pdev, 3681 const struct pci_device_id *ent) 3682 { 3683 return dfx_register(&pdev->dev); 3684 } 3685 3686 static void dfx_pci_unregister(struct pci_dev *pdev) 3687 { 3688 dfx_unregister(&pdev->dev); 3689 } 3690 #endif /* CONFIG_PCI */ 3691 3692 #ifdef CONFIG_EISA 3693 static struct eisa_device_id dfx_eisa_table[] = { 3694 { "DEC3001", DEFEA_PROD_ID_1 }, 3695 { "DEC3002", DEFEA_PROD_ID_2 }, 3696 { "DEC3003", DEFEA_PROD_ID_3 }, 3697 { "DEC3004", DEFEA_PROD_ID_4 }, 3698 { } 3699 }; 3700 MODULE_DEVICE_TABLE(eisa, dfx_eisa_table); 3701 3702 static struct eisa_driver dfx_eisa_driver = { 3703 .id_table = dfx_eisa_table, 3704 .driver = { 3705 .name = "defxx", 3706 .bus = &eisa_bus_type, 3707 .probe = dfx_dev_register, 3708 .remove = dfx_dev_unregister, 3709 }, 3710 }; 3711 #endif /* CONFIG_EISA */ 3712 3713 #ifdef CONFIG_TC 3714 static struct tc_device_id const dfx_tc_table[] = { 3715 { "DEC ", "PMAF-FA " }, 3716 { "DEC ", "PMAF-FD " }, 3717 { "DEC ", "PMAF-FS " }, 3718 { "DEC ", "PMAF-FU " }, 3719 { } 3720 }; 3721 MODULE_DEVICE_TABLE(tc, dfx_tc_table); 3722 3723 static struct tc_driver dfx_tc_driver = { 3724 .id_table = dfx_tc_table, 3725 .driver = { 3726 .name = "defxx", 3727 .bus = &tc_bus_type, 3728 .probe = dfx_dev_register, 3729 .remove = dfx_dev_unregister, 3730 }, 3731 }; 3732 #endif /* CONFIG_TC */ 3733 3734 static int __maybe_unused dfx_dev_register(struct device *dev) 3735 { 3736 int status; 3737 3738 status = dfx_register(dev); 3739 if (!status) 3740 get_device(dev); 3741 return status; 3742 } 3743 3744 static int __maybe_unused dfx_dev_unregister(struct device *dev) 3745 { 3746 put_device(dev); 3747 dfx_unregister(dev); 3748 return 0; 3749 } 3750 3751 3752 static int dfx_init(void) 3753 { 3754 int status; 3755 3756 status = pci_register_driver(&dfx_pci_driver); 3757 if (!status) 3758 status = eisa_driver_register(&dfx_eisa_driver); 3759 if (!status) 3760 status = tc_register_driver(&dfx_tc_driver); 3761 return status; 3762 } 3763 3764 static void dfx_cleanup(void) 3765 { 3766 tc_unregister_driver(&dfx_tc_driver); 3767 eisa_driver_unregister(&dfx_eisa_driver); 3768 pci_unregister_driver(&dfx_pci_driver); 3769 } 3770 3771 module_init(dfx_init); 3772 module_exit(dfx_cleanup); 3773 MODULE_AUTHOR("Lawrence V. Stefani"); 3774 MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver " 3775 DRV_VERSION " " DRV_RELDATE); 3776 MODULE_LICENSE("GPL"); 3777