1 /* 2 * Amiga Linux/68k A2065 Ethernet Driver 3 * 4 * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org> 5 * 6 * Fixes and tips by: 7 * - Janos Farkas (CHEXUM@sparta.banki.hu) 8 * - Jes Degn Soerensen (jds@kom.auc.dk) 9 * - Matt Domsch (Matt_Domsch@dell.com) 10 * 11 * ---------------------------------------------------------------------------- 12 * 13 * This program is based on 14 * 15 * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver 16 * (C) Copyright 1995 by Geert Uytterhoeven, 17 * Peter De Schrijver 18 * 19 * lance.c: An AMD LANCE ethernet driver for linux. 20 * Written 1993-94 by Donald Becker. 21 * 22 * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller 23 * Advanced Micro Devices 24 * Publication #16907, Rev. B, Amendment/0, May 1994 25 * 26 * ---------------------------------------------------------------------------- 27 * 28 * This file is subject to the terms and conditions of the GNU General Public 29 * License. See the file COPYING in the main directory of the Linux 30 * distribution for more details. 31 * 32 * ---------------------------------------------------------------------------- 33 * 34 * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains: 35 * 36 * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with 37 * both 10BASE-2 (thin coax) and AUI (DB-15) connectors 38 */ 39 40 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 41 42 /*#define DEBUG*/ 43 /*#define TEST_HITS*/ 44 45 #include <linux/errno.h> 46 #include <linux/netdevice.h> 47 #include <linux/etherdevice.h> 48 #include <linux/module.h> 49 #include <linux/stddef.h> 50 #include <linux/kernel.h> 51 #include <linux/interrupt.h> 52 #include <linux/ioport.h> 53 #include <linux/skbuff.h> 54 #include <linux/string.h> 55 #include <linux/init.h> 56 #include <linux/crc32.h> 57 #include <linux/zorro.h> 58 #include <linux/bitops.h> 59 60 #include <asm/irq.h> 61 #include <asm/amigaints.h> 62 #include <asm/amigahw.h> 63 64 #include "a2065.h" 65 66 /* Transmit/Receive Ring Definitions */ 67 68 #define LANCE_LOG_TX_BUFFERS (2) 69 #define LANCE_LOG_RX_BUFFERS (4) 70 71 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) 72 #define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS) 73 74 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 75 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1) 76 77 #define PKT_BUF_SIZE (1544) 78 #define RX_BUFF_SIZE PKT_BUF_SIZE 79 #define TX_BUFF_SIZE PKT_BUF_SIZE 80 81 /* Layout of the Lance's RAM Buffer */ 82 83 struct lance_init_block { 84 unsigned short mode; /* Pre-set mode (reg. 15) */ 85 unsigned char phys_addr[6]; /* Physical ethernet address */ 86 unsigned filter[2]; /* Multicast filter. */ 87 88 /* Receive and transmit ring base, along with extra bits. */ 89 unsigned short rx_ptr; /* receive descriptor addr */ 90 unsigned short rx_len; /* receive len and high addr */ 91 unsigned short tx_ptr; /* transmit descriptor addr */ 92 unsigned short tx_len; /* transmit len and high addr */ 93 94 /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ 95 struct lance_rx_desc brx_ring[RX_RING_SIZE]; 96 struct lance_tx_desc btx_ring[TX_RING_SIZE]; 97 98 char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; 99 char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE]; 100 }; 101 102 /* Private Device Data */ 103 104 struct lance_private { 105 char *name; 106 volatile struct lance_regs *ll; 107 volatile struct lance_init_block *init_block; /* Hosts view */ 108 volatile struct lance_init_block *lance_init_block; /* Lance view */ 109 110 int rx_new, tx_new; 111 int rx_old, tx_old; 112 113 int lance_log_rx_bufs, lance_log_tx_bufs; 114 int rx_ring_mod_mask, tx_ring_mod_mask; 115 116 int tpe; /* cable-selection is TPE */ 117 int auto_select; /* cable-selection by carrier */ 118 unsigned short busmaster_regval; 119 120 #ifdef CONFIG_SUNLANCE 121 struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */ 122 int burst_sizes; /* ledma SBus burst sizes */ 123 #endif 124 struct timer_list multicast_timer; 125 }; 126 127 #define LANCE_ADDR(x) ((int)(x) & ~0xff000000) 128 129 /* Load the CSR registers */ 130 static void load_csrs(struct lance_private *lp) 131 { 132 volatile struct lance_regs *ll = lp->ll; 133 volatile struct lance_init_block *aib = lp->lance_init_block; 134 int leptr = LANCE_ADDR(aib); 135 136 ll->rap = LE_CSR1; 137 ll->rdp = (leptr & 0xFFFF); 138 ll->rap = LE_CSR2; 139 ll->rdp = leptr >> 16; 140 ll->rap = LE_CSR3; 141 ll->rdp = lp->busmaster_regval; 142 143 /* Point back to csr0 */ 144 ll->rap = LE_CSR0; 145 } 146 147 /* Setup the Lance Rx and Tx rings */ 148 static void lance_init_ring(struct net_device *dev) 149 { 150 struct lance_private *lp = netdev_priv(dev); 151 volatile struct lance_init_block *ib = lp->init_block; 152 volatile struct lance_init_block *aib = lp->lance_init_block; 153 /* for LANCE_ADDR computations */ 154 int leptr; 155 int i; 156 157 /* Lock out other processes while setting up hardware */ 158 netif_stop_queue(dev); 159 lp->rx_new = lp->tx_new = 0; 160 lp->rx_old = lp->tx_old = 0; 161 162 ib->mode = 0; 163 164 /* Copy the ethernet address to the lance init block 165 * Note that on the sparc you need to swap the ethernet address. 166 */ 167 ib->phys_addr[0] = dev->dev_addr[1]; 168 ib->phys_addr[1] = dev->dev_addr[0]; 169 ib->phys_addr[2] = dev->dev_addr[3]; 170 ib->phys_addr[3] = dev->dev_addr[2]; 171 ib->phys_addr[4] = dev->dev_addr[5]; 172 ib->phys_addr[5] = dev->dev_addr[4]; 173 174 /* Setup the Tx ring entries */ 175 netdev_dbg(dev, "TX rings:\n"); 176 for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) { 177 leptr = LANCE_ADDR(&aib->tx_buf[i][0]); 178 ib->btx_ring[i].tmd0 = leptr; 179 ib->btx_ring[i].tmd1_hadr = leptr >> 16; 180 ib->btx_ring[i].tmd1_bits = 0; 181 ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */ 182 ib->btx_ring[i].misc = 0; 183 if (i < 3) 184 netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); 185 } 186 187 /* Setup the Rx ring entries */ 188 netdev_dbg(dev, "RX rings:\n"); 189 for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) { 190 leptr = LANCE_ADDR(&aib->rx_buf[i][0]); 191 192 ib->brx_ring[i].rmd0 = leptr; 193 ib->brx_ring[i].rmd1_hadr = leptr >> 16; 194 ib->brx_ring[i].rmd1_bits = LE_R1_OWN; 195 ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000; 196 ib->brx_ring[i].mblength = 0; 197 if (i < 3) 198 netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); 199 } 200 201 /* Setup the initialization block */ 202 203 /* Setup rx descriptor pointer */ 204 leptr = LANCE_ADDR(&aib->brx_ring); 205 ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); 206 ib->rx_ptr = leptr; 207 netdev_dbg(dev, "RX ptr: %08x\n", leptr); 208 209 /* Setup tx descriptor pointer */ 210 leptr = LANCE_ADDR(&aib->btx_ring); 211 ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); 212 ib->tx_ptr = leptr; 213 netdev_dbg(dev, "TX ptr: %08x\n", leptr); 214 215 /* Clear the multicast filter */ 216 ib->filter[0] = 0; 217 ib->filter[1] = 0; 218 } 219 220 static int init_restart_lance(struct lance_private *lp) 221 { 222 volatile struct lance_regs *ll = lp->ll; 223 int i; 224 225 ll->rap = LE_CSR0; 226 ll->rdp = LE_C0_INIT; 227 228 /* Wait for the lance to complete initialization */ 229 for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++) 230 barrier(); 231 if ((i == 100) || (ll->rdp & LE_C0_ERR)) { 232 pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp); 233 return -EIO; 234 } 235 236 /* Clear IDON by writing a "1", enable interrupts and start lance */ 237 ll->rdp = LE_C0_IDON; 238 ll->rdp = LE_C0_INEA | LE_C0_STRT; 239 240 return 0; 241 } 242 243 static int lance_rx(struct net_device *dev) 244 { 245 struct lance_private *lp = netdev_priv(dev); 246 volatile struct lance_init_block *ib = lp->init_block; 247 volatile struct lance_regs *ll = lp->ll; 248 volatile struct lance_rx_desc *rd; 249 unsigned char bits; 250 251 #ifdef TEST_HITS 252 int i; 253 char buf[RX_RING_SIZE + 1]; 254 255 for (i = 0; i < RX_RING_SIZE; i++) { 256 char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN; 257 if (i == lp->rx_new) 258 buf[i] = r1_own ? '_' : 'X'; 259 else 260 buf[i] = r1_own ? '.' : '1'; 261 } 262 buf[RX_RING_SIZE] = 0; 263 264 pr_debug("RxRing TestHits: [%s]\n", buf); 265 #endif 266 267 ll->rdp = LE_C0_RINT | LE_C0_INEA; 268 for (rd = &ib->brx_ring[lp->rx_new]; 269 !((bits = rd->rmd1_bits) & LE_R1_OWN); 270 rd = &ib->brx_ring[lp->rx_new]) { 271 272 /* We got an incomplete frame? */ 273 if ((bits & LE_R1_POK) != LE_R1_POK) { 274 dev->stats.rx_over_errors++; 275 dev->stats.rx_errors++; 276 continue; 277 } else if (bits & LE_R1_ERR) { 278 /* Count only the end frame as a rx error, 279 * not the beginning 280 */ 281 if (bits & LE_R1_BUF) 282 dev->stats.rx_fifo_errors++; 283 if (bits & LE_R1_CRC) 284 dev->stats.rx_crc_errors++; 285 if (bits & LE_R1_OFL) 286 dev->stats.rx_over_errors++; 287 if (bits & LE_R1_FRA) 288 dev->stats.rx_frame_errors++; 289 if (bits & LE_R1_EOP) 290 dev->stats.rx_errors++; 291 } else { 292 int len = (rd->mblength & 0xfff) - 4; 293 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); 294 295 if (!skb) { 296 dev->stats.rx_dropped++; 297 rd->mblength = 0; 298 rd->rmd1_bits = LE_R1_OWN; 299 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; 300 return 0; 301 } 302 303 skb_reserve(skb, 2); /* 16 byte align */ 304 skb_put(skb, len); /* make room */ 305 skb_copy_to_linear_data(skb, 306 (unsigned char *)&ib->rx_buf[lp->rx_new][0], 307 len); 308 skb->protocol = eth_type_trans(skb, dev); 309 netif_rx(skb); 310 dev->stats.rx_packets++; 311 dev->stats.rx_bytes += len; 312 } 313 314 /* Return the packet to the pool */ 315 rd->mblength = 0; 316 rd->rmd1_bits = LE_R1_OWN; 317 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; 318 } 319 return 0; 320 } 321 322 static int lance_tx(struct net_device *dev) 323 { 324 struct lance_private *lp = netdev_priv(dev); 325 volatile struct lance_init_block *ib = lp->init_block; 326 volatile struct lance_regs *ll = lp->ll; 327 volatile struct lance_tx_desc *td; 328 int i, j; 329 int status; 330 331 /* csr0 is 2f3 */ 332 ll->rdp = LE_C0_TINT | LE_C0_INEA; 333 /* csr0 is 73 */ 334 335 j = lp->tx_old; 336 for (i = j; i != lp->tx_new; i = j) { 337 td = &ib->btx_ring[i]; 338 339 /* If we hit a packet not owned by us, stop */ 340 if (td->tmd1_bits & LE_T1_OWN) 341 break; 342 343 if (td->tmd1_bits & LE_T1_ERR) { 344 status = td->misc; 345 346 dev->stats.tx_errors++; 347 if (status & LE_T3_RTY) 348 dev->stats.tx_aborted_errors++; 349 if (status & LE_T3_LCOL) 350 dev->stats.tx_window_errors++; 351 352 if (status & LE_T3_CLOS) { 353 dev->stats.tx_carrier_errors++; 354 if (lp->auto_select) { 355 lp->tpe = 1 - lp->tpe; 356 netdev_err(dev, "Carrier Lost, trying %s\n", 357 lp->tpe ? "TPE" : "AUI"); 358 /* Stop the lance */ 359 ll->rap = LE_CSR0; 360 ll->rdp = LE_C0_STOP; 361 lance_init_ring(dev); 362 load_csrs(lp); 363 init_restart_lance(lp); 364 return 0; 365 } 366 } 367 368 /* buffer errors and underflows turn off 369 * the transmitter, so restart the adapter 370 */ 371 if (status & (LE_T3_BUF | LE_T3_UFL)) { 372 dev->stats.tx_fifo_errors++; 373 374 netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n"); 375 /* Stop the lance */ 376 ll->rap = LE_CSR0; 377 ll->rdp = LE_C0_STOP; 378 lance_init_ring(dev); 379 load_csrs(lp); 380 init_restart_lance(lp); 381 return 0; 382 } 383 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { 384 /* So we don't count the packet more than once. */ 385 td->tmd1_bits &= ~(LE_T1_POK); 386 387 /* One collision before packet was sent. */ 388 if (td->tmd1_bits & LE_T1_EONE) 389 dev->stats.collisions++; 390 391 /* More than one collision, be optimistic. */ 392 if (td->tmd1_bits & LE_T1_EMORE) 393 dev->stats.collisions += 2; 394 395 dev->stats.tx_packets++; 396 } 397 398 j = (j + 1) & lp->tx_ring_mod_mask; 399 } 400 lp->tx_old = j; 401 ll->rdp = LE_C0_TINT | LE_C0_INEA; 402 return 0; 403 } 404 405 static int lance_tx_buffs_avail(struct lance_private *lp) 406 { 407 if (lp->tx_old <= lp->tx_new) 408 return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new; 409 return lp->tx_old - lp->tx_new - 1; 410 } 411 412 static irqreturn_t lance_interrupt(int irq, void *dev_id) 413 { 414 struct net_device *dev = dev_id; 415 struct lance_private *lp = netdev_priv(dev); 416 volatile struct lance_regs *ll = lp->ll; 417 int csr0; 418 419 ll->rap = LE_CSR0; /* LANCE Controller Status */ 420 csr0 = ll->rdp; 421 422 if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */ 423 return IRQ_NONE; /* been generated by the Lance. */ 424 425 /* Acknowledge all the interrupt sources ASAP */ 426 ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT | 427 LE_C0_INIT); 428 429 if (csr0 & LE_C0_ERR) { 430 /* Clear the error condition */ 431 ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA; 432 } 433 434 if (csr0 & LE_C0_RINT) 435 lance_rx(dev); 436 437 if (csr0 & LE_C0_TINT) 438 lance_tx(dev); 439 440 /* Log misc errors. */ 441 if (csr0 & LE_C0_BABL) 442 dev->stats.tx_errors++; /* Tx babble. */ 443 if (csr0 & LE_C0_MISS) 444 dev->stats.rx_errors++; /* Missed a Rx frame. */ 445 if (csr0 & LE_C0_MERR) { 446 netdev_err(dev, "Bus master arbitration failure, status %04x\n", 447 csr0); 448 /* Restart the chip. */ 449 ll->rdp = LE_C0_STRT; 450 } 451 452 if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0) 453 netif_wake_queue(dev); 454 455 ll->rap = LE_CSR0; 456 ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR | 457 LE_C0_IDON | LE_C0_INEA); 458 return IRQ_HANDLED; 459 } 460 461 static int lance_open(struct net_device *dev) 462 { 463 struct lance_private *lp = netdev_priv(dev); 464 volatile struct lance_regs *ll = lp->ll; 465 int ret; 466 467 /* Stop the Lance */ 468 ll->rap = LE_CSR0; 469 ll->rdp = LE_C0_STOP; 470 471 /* Install the Interrupt handler */ 472 ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED, 473 dev->name, dev); 474 if (ret) 475 return ret; 476 477 load_csrs(lp); 478 lance_init_ring(dev); 479 480 netif_start_queue(dev); 481 482 return init_restart_lance(lp); 483 } 484 485 static int lance_close(struct net_device *dev) 486 { 487 struct lance_private *lp = netdev_priv(dev); 488 volatile struct lance_regs *ll = lp->ll; 489 490 netif_stop_queue(dev); 491 del_timer_sync(&lp->multicast_timer); 492 493 /* Stop the card */ 494 ll->rap = LE_CSR0; 495 ll->rdp = LE_C0_STOP; 496 497 free_irq(IRQ_AMIGA_PORTS, dev); 498 return 0; 499 } 500 501 static inline int lance_reset(struct net_device *dev) 502 { 503 struct lance_private *lp = netdev_priv(dev); 504 volatile struct lance_regs *ll = lp->ll; 505 int status; 506 507 /* Stop the lance */ 508 ll->rap = LE_CSR0; 509 ll->rdp = LE_C0_STOP; 510 511 load_csrs(lp); 512 513 lance_init_ring(dev); 514 dev->trans_start = jiffies; /* prevent tx timeout */ 515 netif_start_queue(dev); 516 517 status = init_restart_lance(lp); 518 netdev_dbg(dev, "Lance restart=%d\n", status); 519 520 return status; 521 } 522 523 static void lance_tx_timeout(struct net_device *dev) 524 { 525 struct lance_private *lp = netdev_priv(dev); 526 volatile struct lance_regs *ll = lp->ll; 527 528 netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp); 529 lance_reset(dev); 530 netif_wake_queue(dev); 531 } 532 533 static netdev_tx_t lance_start_xmit(struct sk_buff *skb, 534 struct net_device *dev) 535 { 536 struct lance_private *lp = netdev_priv(dev); 537 volatile struct lance_regs *ll = lp->ll; 538 volatile struct lance_init_block *ib = lp->init_block; 539 int entry, skblen; 540 int status = NETDEV_TX_OK; 541 unsigned long flags; 542 543 if (skb_padto(skb, ETH_ZLEN)) 544 return NETDEV_TX_OK; 545 skblen = max_t(unsigned, skb->len, ETH_ZLEN); 546 547 local_irq_save(flags); 548 549 if (!lance_tx_buffs_avail(lp)) { 550 local_irq_restore(flags); 551 return NETDEV_TX_LOCKED; 552 } 553 554 #ifdef DEBUG 555 /* dump the packet */ 556 print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE, 557 16, 1, skb->data, 64, true); 558 #endif 559 entry = lp->tx_new & lp->tx_ring_mod_mask; 560 ib->btx_ring[entry].length = (-skblen) | 0xf000; 561 ib->btx_ring[entry].misc = 0; 562 563 skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); 564 565 /* Now, give the packet to the lance */ 566 ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); 567 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; 568 dev->stats.tx_bytes += skblen; 569 570 if (lance_tx_buffs_avail(lp) <= 0) 571 netif_stop_queue(dev); 572 573 /* Kick the lance: transmit now */ 574 ll->rdp = LE_C0_INEA | LE_C0_TDMD; 575 dev_kfree_skb(skb); 576 577 local_irq_restore(flags); 578 579 return status; 580 } 581 582 /* taken from the depca driver */ 583 static void lance_load_multicast(struct net_device *dev) 584 { 585 struct lance_private *lp = netdev_priv(dev); 586 volatile struct lance_init_block *ib = lp->init_block; 587 volatile u16 *mcast_table = (u16 *)&ib->filter; 588 struct netdev_hw_addr *ha; 589 u32 crc; 590 591 /* set all multicast bits */ 592 if (dev->flags & IFF_ALLMULTI) { 593 ib->filter[0] = 0xffffffff; 594 ib->filter[1] = 0xffffffff; 595 return; 596 } 597 /* clear the multicast filter */ 598 ib->filter[0] = 0; 599 ib->filter[1] = 0; 600 601 /* Add addresses */ 602 netdev_for_each_mc_addr(ha, dev) { 603 crc = ether_crc_le(6, ha->addr); 604 crc = crc >> 26; 605 mcast_table[crc >> 4] |= 1 << (crc & 0xf); 606 } 607 } 608 609 static void lance_set_multicast(struct net_device *dev) 610 { 611 struct lance_private *lp = netdev_priv(dev); 612 volatile struct lance_init_block *ib = lp->init_block; 613 volatile struct lance_regs *ll = lp->ll; 614 615 if (!netif_running(dev)) 616 return; 617 618 if (lp->tx_old != lp->tx_new) { 619 mod_timer(&lp->multicast_timer, jiffies + 4); 620 netif_wake_queue(dev); 621 return; 622 } 623 624 netif_stop_queue(dev); 625 626 ll->rap = LE_CSR0; 627 ll->rdp = LE_C0_STOP; 628 lance_init_ring(dev); 629 630 if (dev->flags & IFF_PROMISC) { 631 ib->mode |= LE_MO_PROM; 632 } else { 633 ib->mode &= ~LE_MO_PROM; 634 lance_load_multicast(dev); 635 } 636 load_csrs(lp); 637 init_restart_lance(lp); 638 netif_wake_queue(dev); 639 } 640 641 static int a2065_init_one(struct zorro_dev *z, 642 const struct zorro_device_id *ent); 643 static void a2065_remove_one(struct zorro_dev *z); 644 645 646 static struct zorro_device_id a2065_zorro_tbl[] = { 647 { ZORRO_PROD_CBM_A2065_1 }, 648 { ZORRO_PROD_CBM_A2065_2 }, 649 { ZORRO_PROD_AMERISTAR_A2065 }, 650 { 0 } 651 }; 652 MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl); 653 654 static struct zorro_driver a2065_driver = { 655 .name = "a2065", 656 .id_table = a2065_zorro_tbl, 657 .probe = a2065_init_one, 658 .remove = a2065_remove_one, 659 }; 660 661 static const struct net_device_ops lance_netdev_ops = { 662 .ndo_open = lance_open, 663 .ndo_stop = lance_close, 664 .ndo_start_xmit = lance_start_xmit, 665 .ndo_tx_timeout = lance_tx_timeout, 666 .ndo_set_rx_mode = lance_set_multicast, 667 .ndo_validate_addr = eth_validate_addr, 668 .ndo_change_mtu = eth_change_mtu, 669 .ndo_set_mac_address = eth_mac_addr, 670 }; 671 672 static int a2065_init_one(struct zorro_dev *z, 673 const struct zorro_device_id *ent) 674 { 675 struct net_device *dev; 676 struct lance_private *priv; 677 unsigned long board = z->resource.start; 678 unsigned long base_addr = board + A2065_LANCE; 679 unsigned long mem_start = board + A2065_RAM; 680 struct resource *r1, *r2; 681 int err; 682 683 r1 = request_mem_region(base_addr, sizeof(struct lance_regs), 684 "Am7990"); 685 if (!r1) 686 return -EBUSY; 687 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); 688 if (!r2) { 689 release_mem_region(base_addr, sizeof(struct lance_regs)); 690 return -EBUSY; 691 } 692 693 dev = alloc_etherdev(sizeof(struct lance_private)); 694 if (dev == NULL) { 695 release_mem_region(base_addr, sizeof(struct lance_regs)); 696 release_mem_region(mem_start, A2065_RAM_SIZE); 697 return -ENOMEM; 698 } 699 700 priv = netdev_priv(dev); 701 702 r1->name = dev->name; 703 r2->name = dev->name; 704 705 dev->dev_addr[0] = 0x00; 706 if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */ 707 dev->dev_addr[1] = 0x80; 708 dev->dev_addr[2] = 0x10; 709 } else { /* Ameristar */ 710 dev->dev_addr[1] = 0x00; 711 dev->dev_addr[2] = 0x9f; 712 } 713 dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff; 714 dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff; 715 dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff; 716 dev->base_addr = ZTWO_VADDR(base_addr); 717 dev->mem_start = ZTWO_VADDR(mem_start); 718 dev->mem_end = dev->mem_start + A2065_RAM_SIZE; 719 720 priv->ll = (volatile struct lance_regs *)dev->base_addr; 721 priv->init_block = (struct lance_init_block *)dev->mem_start; 722 priv->lance_init_block = (struct lance_init_block *)A2065_RAM; 723 priv->auto_select = 0; 724 priv->busmaster_regval = LE_C3_BSWP; 725 726 priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; 727 priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; 728 priv->rx_ring_mod_mask = RX_RING_MOD_MASK; 729 priv->tx_ring_mod_mask = TX_RING_MOD_MASK; 730 731 dev->netdev_ops = &lance_netdev_ops; 732 dev->watchdog_timeo = 5*HZ; 733 dev->dma = 0; 734 735 init_timer(&priv->multicast_timer); 736 priv->multicast_timer.data = (unsigned long) dev; 737 priv->multicast_timer.function = 738 (void (*)(unsigned long))lance_set_multicast; 739 740 err = register_netdev(dev); 741 if (err) { 742 release_mem_region(base_addr, sizeof(struct lance_regs)); 743 release_mem_region(mem_start, A2065_RAM_SIZE); 744 free_netdev(dev); 745 return err; 746 } 747 zorro_set_drvdata(z, dev); 748 749 netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n", 750 board, dev->dev_addr); 751 752 return 0; 753 } 754 755 756 static void a2065_remove_one(struct zorro_dev *z) 757 { 758 struct net_device *dev = zorro_get_drvdata(z); 759 760 unregister_netdev(dev); 761 release_mem_region(ZTWO_PADDR(dev->base_addr), 762 sizeof(struct lance_regs)); 763 release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE); 764 free_netdev(dev); 765 } 766 767 static int __init a2065_init_module(void) 768 { 769 return zorro_register_driver(&a2065_driver); 770 } 771 772 static void __exit a2065_cleanup_module(void) 773 { 774 zorro_unregister_driver(&a2065_driver); 775 } 776 777 module_init(a2065_init_module); 778 module_exit(a2065_cleanup_module); 779 780 MODULE_LICENSE("GPL"); 781