1 /* 2 * Amiga Linux/68k A2065 Ethernet Driver 3 * 4 * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org> 5 * 6 * Fixes and tips by: 7 * - Janos Farkas (CHEXUM@sparta.banki.hu) 8 * - Jes Degn Soerensen (jds@kom.auc.dk) 9 * - Matt Domsch (Matt_Domsch@dell.com) 10 * 11 * ---------------------------------------------------------------------------- 12 * 13 * This program is based on 14 * 15 * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver 16 * (C) Copyright 1995 by Geert Uytterhoeven, 17 * Peter De Schrijver 18 * 19 * lance.c: An AMD LANCE ethernet driver for linux. 20 * Written 1993-94 by Donald Becker. 21 * 22 * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller 23 * Advanced Micro Devices 24 * Publication #16907, Rev. B, Amendment/0, May 1994 25 * 26 * ---------------------------------------------------------------------------- 27 * 28 * This file is subject to the terms and conditions of the GNU General Public 29 * License. See the file COPYING in the main directory of the Linux 30 * distribution for more details. 31 * 32 * ---------------------------------------------------------------------------- 33 * 34 * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains: 35 * 36 * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with 37 * both 10BASE-2 (thin coax) and AUI (DB-15) connectors 38 */ 39 40 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 41 42 /*#define DEBUG*/ 43 /*#define TEST_HITS*/ 44 45 #include <linux/errno.h> 46 #include <linux/netdevice.h> 47 #include <linux/etherdevice.h> 48 #include <linux/module.h> 49 #include <linux/stddef.h> 50 #include <linux/kernel.h> 51 #include <linux/interrupt.h> 52 #include <linux/ioport.h> 53 #include <linux/skbuff.h> 54 #include <linux/string.h> 55 #include <linux/init.h> 56 #include <linux/crc32.h> 57 #include <linux/zorro.h> 58 #include <linux/bitops.h> 59 60 #include <asm/byteorder.h> 61 #include <asm/irq.h> 62 #include <asm/amigaints.h> 63 #include <asm/amigahw.h> 64 65 #include "a2065.h" 66 67 /* Transmit/Receive Ring Definitions */ 68 69 #define LANCE_LOG_TX_BUFFERS (2) 70 #define LANCE_LOG_RX_BUFFERS (4) 71 72 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) 73 #define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS) 74 75 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 76 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1) 77 78 #define PKT_BUF_SIZE (1544) 79 #define RX_BUFF_SIZE PKT_BUF_SIZE 80 #define TX_BUFF_SIZE PKT_BUF_SIZE 81 82 /* Layout of the Lance's RAM Buffer */ 83 84 struct lance_init_block { 85 unsigned short mode; /* Pre-set mode (reg. 15) */ 86 unsigned char phys_addr[6]; /* Physical ethernet address */ 87 unsigned filter[2]; /* Multicast filter. */ 88 89 /* Receive and transmit ring base, along with extra bits. */ 90 unsigned short rx_ptr; /* receive descriptor addr */ 91 unsigned short rx_len; /* receive len and high addr */ 92 unsigned short tx_ptr; /* transmit descriptor addr */ 93 unsigned short tx_len; /* transmit len and high addr */ 94 95 /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ 96 struct lance_rx_desc brx_ring[RX_RING_SIZE]; 97 struct lance_tx_desc btx_ring[TX_RING_SIZE]; 98 99 char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; 100 char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE]; 101 }; 102 103 /* Private Device Data */ 104 105 struct lance_private { 106 char *name; 107 volatile struct lance_regs *ll; 108 volatile struct lance_init_block *init_block; /* Hosts view */ 109 volatile struct lance_init_block *lance_init_block; /* Lance view */ 110 111 int rx_new, tx_new; 112 int rx_old, tx_old; 113 114 int lance_log_rx_bufs, lance_log_tx_bufs; 115 int rx_ring_mod_mask, tx_ring_mod_mask; 116 117 int tpe; /* cable-selection is TPE */ 118 int auto_select; /* cable-selection by carrier */ 119 unsigned short busmaster_regval; 120 121 #ifdef CONFIG_SUNLANCE 122 struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */ 123 int burst_sizes; /* ledma SBus burst sizes */ 124 #endif 125 struct timer_list multicast_timer; 126 }; 127 128 #define LANCE_ADDR(x) ((int)(x) & ~0xff000000) 129 130 /* Load the CSR registers */ 131 static void load_csrs(struct lance_private *lp) 132 { 133 volatile struct lance_regs *ll = lp->ll; 134 volatile struct lance_init_block *aib = lp->lance_init_block; 135 int leptr = LANCE_ADDR(aib); 136 137 ll->rap = LE_CSR1; 138 ll->rdp = (leptr & 0xFFFF); 139 ll->rap = LE_CSR2; 140 ll->rdp = leptr >> 16; 141 ll->rap = LE_CSR3; 142 ll->rdp = lp->busmaster_regval; 143 144 /* Point back to csr0 */ 145 ll->rap = LE_CSR0; 146 } 147 148 /* Setup the Lance Rx and Tx rings */ 149 static void lance_init_ring(struct net_device *dev) 150 { 151 struct lance_private *lp = netdev_priv(dev); 152 volatile struct lance_init_block *ib = lp->init_block; 153 volatile struct lance_init_block *aib = lp->lance_init_block; 154 /* for LANCE_ADDR computations */ 155 int leptr; 156 int i; 157 158 /* Lock out other processes while setting up hardware */ 159 netif_stop_queue(dev); 160 lp->rx_new = lp->tx_new = 0; 161 lp->rx_old = lp->tx_old = 0; 162 163 ib->mode = 0; 164 165 /* Copy the ethernet address to the lance init block 166 * Note that on the sparc you need to swap the ethernet address. 167 */ 168 ib->phys_addr[0] = dev->dev_addr[1]; 169 ib->phys_addr[1] = dev->dev_addr[0]; 170 ib->phys_addr[2] = dev->dev_addr[3]; 171 ib->phys_addr[3] = dev->dev_addr[2]; 172 ib->phys_addr[4] = dev->dev_addr[5]; 173 ib->phys_addr[5] = dev->dev_addr[4]; 174 175 /* Setup the Tx ring entries */ 176 netdev_dbg(dev, "TX rings:\n"); 177 for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) { 178 leptr = LANCE_ADDR(&aib->tx_buf[i][0]); 179 ib->btx_ring[i].tmd0 = leptr; 180 ib->btx_ring[i].tmd1_hadr = leptr >> 16; 181 ib->btx_ring[i].tmd1_bits = 0; 182 ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */ 183 ib->btx_ring[i].misc = 0; 184 if (i < 3) 185 netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); 186 } 187 188 /* Setup the Rx ring entries */ 189 netdev_dbg(dev, "RX rings:\n"); 190 for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) { 191 leptr = LANCE_ADDR(&aib->rx_buf[i][0]); 192 193 ib->brx_ring[i].rmd0 = leptr; 194 ib->brx_ring[i].rmd1_hadr = leptr >> 16; 195 ib->brx_ring[i].rmd1_bits = LE_R1_OWN; 196 ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000; 197 ib->brx_ring[i].mblength = 0; 198 if (i < 3) 199 netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); 200 } 201 202 /* Setup the initialization block */ 203 204 /* Setup rx descriptor pointer */ 205 leptr = LANCE_ADDR(&aib->brx_ring); 206 ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); 207 ib->rx_ptr = leptr; 208 netdev_dbg(dev, "RX ptr: %08x\n", leptr); 209 210 /* Setup tx descriptor pointer */ 211 leptr = LANCE_ADDR(&aib->btx_ring); 212 ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); 213 ib->tx_ptr = leptr; 214 netdev_dbg(dev, "TX ptr: %08x\n", leptr); 215 216 /* Clear the multicast filter */ 217 ib->filter[0] = 0; 218 ib->filter[1] = 0; 219 } 220 221 static int init_restart_lance(struct lance_private *lp) 222 { 223 volatile struct lance_regs *ll = lp->ll; 224 int i; 225 226 ll->rap = LE_CSR0; 227 ll->rdp = LE_C0_INIT; 228 229 /* Wait for the lance to complete initialization */ 230 for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++) 231 barrier(); 232 if ((i == 100) || (ll->rdp & LE_C0_ERR)) { 233 pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp); 234 return -EIO; 235 } 236 237 /* Clear IDON by writing a "1", enable interrupts and start lance */ 238 ll->rdp = LE_C0_IDON; 239 ll->rdp = LE_C0_INEA | LE_C0_STRT; 240 241 return 0; 242 } 243 244 static int lance_rx(struct net_device *dev) 245 { 246 struct lance_private *lp = netdev_priv(dev); 247 volatile struct lance_init_block *ib = lp->init_block; 248 volatile struct lance_regs *ll = lp->ll; 249 volatile struct lance_rx_desc *rd; 250 unsigned char bits; 251 252 #ifdef TEST_HITS 253 int i; 254 char buf[RX_RING_SIZE + 1]; 255 256 for (i = 0; i < RX_RING_SIZE; i++) { 257 char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN; 258 if (i == lp->rx_new) 259 buf[i] = r1_own ? '_' : 'X'; 260 else 261 buf[i] = r1_own ? '.' : '1'; 262 } 263 buf[RX_RING_SIZE] = 0; 264 265 pr_debug("RxRing TestHits: [%s]\n", buf); 266 #endif 267 268 ll->rdp = LE_C0_RINT | LE_C0_INEA; 269 for (rd = &ib->brx_ring[lp->rx_new]; 270 !((bits = rd->rmd1_bits) & LE_R1_OWN); 271 rd = &ib->brx_ring[lp->rx_new]) { 272 273 /* We got an incomplete frame? */ 274 if ((bits & LE_R1_POK) != LE_R1_POK) { 275 dev->stats.rx_over_errors++; 276 dev->stats.rx_errors++; 277 continue; 278 } else if (bits & LE_R1_ERR) { 279 /* Count only the end frame as a rx error, 280 * not the beginning 281 */ 282 if (bits & LE_R1_BUF) 283 dev->stats.rx_fifo_errors++; 284 if (bits & LE_R1_CRC) 285 dev->stats.rx_crc_errors++; 286 if (bits & LE_R1_OFL) 287 dev->stats.rx_over_errors++; 288 if (bits & LE_R1_FRA) 289 dev->stats.rx_frame_errors++; 290 if (bits & LE_R1_EOP) 291 dev->stats.rx_errors++; 292 } else { 293 int len = (rd->mblength & 0xfff) - 4; 294 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); 295 296 if (!skb) { 297 dev->stats.rx_dropped++; 298 rd->mblength = 0; 299 rd->rmd1_bits = LE_R1_OWN; 300 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; 301 return 0; 302 } 303 304 skb_reserve(skb, 2); /* 16 byte align */ 305 skb_put(skb, len); /* make room */ 306 skb_copy_to_linear_data(skb, 307 (unsigned char *)&ib->rx_buf[lp->rx_new][0], 308 len); 309 skb->protocol = eth_type_trans(skb, dev); 310 netif_rx(skb); 311 dev->stats.rx_packets++; 312 dev->stats.rx_bytes += len; 313 } 314 315 /* Return the packet to the pool */ 316 rd->mblength = 0; 317 rd->rmd1_bits = LE_R1_OWN; 318 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; 319 } 320 return 0; 321 } 322 323 static int lance_tx(struct net_device *dev) 324 { 325 struct lance_private *lp = netdev_priv(dev); 326 volatile struct lance_init_block *ib = lp->init_block; 327 volatile struct lance_regs *ll = lp->ll; 328 volatile struct lance_tx_desc *td; 329 int i, j; 330 int status; 331 332 /* csr0 is 2f3 */ 333 ll->rdp = LE_C0_TINT | LE_C0_INEA; 334 /* csr0 is 73 */ 335 336 j = lp->tx_old; 337 for (i = j; i != lp->tx_new; i = j) { 338 td = &ib->btx_ring[i]; 339 340 /* If we hit a packet not owned by us, stop */ 341 if (td->tmd1_bits & LE_T1_OWN) 342 break; 343 344 if (td->tmd1_bits & LE_T1_ERR) { 345 status = td->misc; 346 347 dev->stats.tx_errors++; 348 if (status & LE_T3_RTY) 349 dev->stats.tx_aborted_errors++; 350 if (status & LE_T3_LCOL) 351 dev->stats.tx_window_errors++; 352 353 if (status & LE_T3_CLOS) { 354 dev->stats.tx_carrier_errors++; 355 if (lp->auto_select) { 356 lp->tpe = 1 - lp->tpe; 357 netdev_err(dev, "Carrier Lost, trying %s\n", 358 lp->tpe ? "TPE" : "AUI"); 359 /* Stop the lance */ 360 ll->rap = LE_CSR0; 361 ll->rdp = LE_C0_STOP; 362 lance_init_ring(dev); 363 load_csrs(lp); 364 init_restart_lance(lp); 365 return 0; 366 } 367 } 368 369 /* buffer errors and underflows turn off 370 * the transmitter, so restart the adapter 371 */ 372 if (status & (LE_T3_BUF | LE_T3_UFL)) { 373 dev->stats.tx_fifo_errors++; 374 375 netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n"); 376 /* Stop the lance */ 377 ll->rap = LE_CSR0; 378 ll->rdp = LE_C0_STOP; 379 lance_init_ring(dev); 380 load_csrs(lp); 381 init_restart_lance(lp); 382 return 0; 383 } 384 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { 385 /* So we don't count the packet more than once. */ 386 td->tmd1_bits &= ~(LE_T1_POK); 387 388 /* One collision before packet was sent. */ 389 if (td->tmd1_bits & LE_T1_EONE) 390 dev->stats.collisions++; 391 392 /* More than one collision, be optimistic. */ 393 if (td->tmd1_bits & LE_T1_EMORE) 394 dev->stats.collisions += 2; 395 396 dev->stats.tx_packets++; 397 } 398 399 j = (j + 1) & lp->tx_ring_mod_mask; 400 } 401 lp->tx_old = j; 402 ll->rdp = LE_C0_TINT | LE_C0_INEA; 403 return 0; 404 } 405 406 static int lance_tx_buffs_avail(struct lance_private *lp) 407 { 408 if (lp->tx_old <= lp->tx_new) 409 return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new; 410 return lp->tx_old - lp->tx_new - 1; 411 } 412 413 static irqreturn_t lance_interrupt(int irq, void *dev_id) 414 { 415 struct net_device *dev = dev_id; 416 struct lance_private *lp = netdev_priv(dev); 417 volatile struct lance_regs *ll = lp->ll; 418 int csr0; 419 420 ll->rap = LE_CSR0; /* LANCE Controller Status */ 421 csr0 = ll->rdp; 422 423 if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */ 424 return IRQ_NONE; /* been generated by the Lance. */ 425 426 /* Acknowledge all the interrupt sources ASAP */ 427 ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT | 428 LE_C0_INIT); 429 430 if (csr0 & LE_C0_ERR) { 431 /* Clear the error condition */ 432 ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA; 433 } 434 435 if (csr0 & LE_C0_RINT) 436 lance_rx(dev); 437 438 if (csr0 & LE_C0_TINT) 439 lance_tx(dev); 440 441 /* Log misc errors. */ 442 if (csr0 & LE_C0_BABL) 443 dev->stats.tx_errors++; /* Tx babble. */ 444 if (csr0 & LE_C0_MISS) 445 dev->stats.rx_errors++; /* Missed a Rx frame. */ 446 if (csr0 & LE_C0_MERR) { 447 netdev_err(dev, "Bus master arbitration failure, status %04x\n", 448 csr0); 449 /* Restart the chip. */ 450 ll->rdp = LE_C0_STRT; 451 } 452 453 if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0) 454 netif_wake_queue(dev); 455 456 ll->rap = LE_CSR0; 457 ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR | 458 LE_C0_IDON | LE_C0_INEA); 459 return IRQ_HANDLED; 460 } 461 462 static int lance_open(struct net_device *dev) 463 { 464 struct lance_private *lp = netdev_priv(dev); 465 volatile struct lance_regs *ll = lp->ll; 466 int ret; 467 468 /* Stop the Lance */ 469 ll->rap = LE_CSR0; 470 ll->rdp = LE_C0_STOP; 471 472 /* Install the Interrupt handler */ 473 ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED, 474 dev->name, dev); 475 if (ret) 476 return ret; 477 478 load_csrs(lp); 479 lance_init_ring(dev); 480 481 netif_start_queue(dev); 482 483 return init_restart_lance(lp); 484 } 485 486 static int lance_close(struct net_device *dev) 487 { 488 struct lance_private *lp = netdev_priv(dev); 489 volatile struct lance_regs *ll = lp->ll; 490 491 netif_stop_queue(dev); 492 del_timer_sync(&lp->multicast_timer); 493 494 /* Stop the card */ 495 ll->rap = LE_CSR0; 496 ll->rdp = LE_C0_STOP; 497 498 free_irq(IRQ_AMIGA_PORTS, dev); 499 return 0; 500 } 501 502 static inline int lance_reset(struct net_device *dev) 503 { 504 struct lance_private *lp = netdev_priv(dev); 505 volatile struct lance_regs *ll = lp->ll; 506 int status; 507 508 /* Stop the lance */ 509 ll->rap = LE_CSR0; 510 ll->rdp = LE_C0_STOP; 511 512 load_csrs(lp); 513 514 lance_init_ring(dev); 515 dev->trans_start = jiffies; /* prevent tx timeout */ 516 netif_start_queue(dev); 517 518 status = init_restart_lance(lp); 519 netdev_dbg(dev, "Lance restart=%d\n", status); 520 521 return status; 522 } 523 524 static void lance_tx_timeout(struct net_device *dev) 525 { 526 struct lance_private *lp = netdev_priv(dev); 527 volatile struct lance_regs *ll = lp->ll; 528 529 netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp); 530 lance_reset(dev); 531 netif_wake_queue(dev); 532 } 533 534 static netdev_tx_t lance_start_xmit(struct sk_buff *skb, 535 struct net_device *dev) 536 { 537 struct lance_private *lp = netdev_priv(dev); 538 volatile struct lance_regs *ll = lp->ll; 539 volatile struct lance_init_block *ib = lp->init_block; 540 int entry, skblen; 541 int status = NETDEV_TX_OK; 542 unsigned long flags; 543 544 if (skb_padto(skb, ETH_ZLEN)) 545 return NETDEV_TX_OK; 546 skblen = max_t(unsigned, skb->len, ETH_ZLEN); 547 548 local_irq_save(flags); 549 550 if (!lance_tx_buffs_avail(lp)) { 551 local_irq_restore(flags); 552 return NETDEV_TX_LOCKED; 553 } 554 555 #ifdef DEBUG 556 /* dump the packet */ 557 print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE, 558 16, 1, skb->data, 64, true); 559 #endif 560 entry = lp->tx_new & lp->tx_ring_mod_mask; 561 ib->btx_ring[entry].length = (-skblen) | 0xf000; 562 ib->btx_ring[entry].misc = 0; 563 564 skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); 565 566 /* Now, give the packet to the lance */ 567 ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); 568 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; 569 dev->stats.tx_bytes += skblen; 570 571 if (lance_tx_buffs_avail(lp) <= 0) 572 netif_stop_queue(dev); 573 574 /* Kick the lance: transmit now */ 575 ll->rdp = LE_C0_INEA | LE_C0_TDMD; 576 dev_kfree_skb(skb); 577 578 local_irq_restore(flags); 579 580 return status; 581 } 582 583 /* taken from the depca driver */ 584 static void lance_load_multicast(struct net_device *dev) 585 { 586 struct lance_private *lp = netdev_priv(dev); 587 volatile struct lance_init_block *ib = lp->init_block; 588 volatile u16 *mcast_table = (u16 *)&ib->filter; 589 struct netdev_hw_addr *ha; 590 u32 crc; 591 592 /* set all multicast bits */ 593 if (dev->flags & IFF_ALLMULTI) { 594 ib->filter[0] = 0xffffffff; 595 ib->filter[1] = 0xffffffff; 596 return; 597 } 598 /* clear the multicast filter */ 599 ib->filter[0] = 0; 600 ib->filter[1] = 0; 601 602 /* Add addresses */ 603 netdev_for_each_mc_addr(ha, dev) { 604 crc = ether_crc_le(6, ha->addr); 605 crc = crc >> 26; 606 mcast_table[crc >> 4] |= 1 << (crc & 0xf); 607 } 608 } 609 610 static void lance_set_multicast(struct net_device *dev) 611 { 612 struct lance_private *lp = netdev_priv(dev); 613 volatile struct lance_init_block *ib = lp->init_block; 614 volatile struct lance_regs *ll = lp->ll; 615 616 if (!netif_running(dev)) 617 return; 618 619 if (lp->tx_old != lp->tx_new) { 620 mod_timer(&lp->multicast_timer, jiffies + 4); 621 netif_wake_queue(dev); 622 return; 623 } 624 625 netif_stop_queue(dev); 626 627 ll->rap = LE_CSR0; 628 ll->rdp = LE_C0_STOP; 629 lance_init_ring(dev); 630 631 if (dev->flags & IFF_PROMISC) { 632 ib->mode |= LE_MO_PROM; 633 } else { 634 ib->mode &= ~LE_MO_PROM; 635 lance_load_multicast(dev); 636 } 637 load_csrs(lp); 638 init_restart_lance(lp); 639 netif_wake_queue(dev); 640 } 641 642 static int a2065_init_one(struct zorro_dev *z, 643 const struct zorro_device_id *ent); 644 static void a2065_remove_one(struct zorro_dev *z); 645 646 647 static struct zorro_device_id a2065_zorro_tbl[] = { 648 { ZORRO_PROD_CBM_A2065_1 }, 649 { ZORRO_PROD_CBM_A2065_2 }, 650 { ZORRO_PROD_AMERISTAR_A2065 }, 651 { 0 } 652 }; 653 MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl); 654 655 static struct zorro_driver a2065_driver = { 656 .name = "a2065", 657 .id_table = a2065_zorro_tbl, 658 .probe = a2065_init_one, 659 .remove = a2065_remove_one, 660 }; 661 662 static const struct net_device_ops lance_netdev_ops = { 663 .ndo_open = lance_open, 664 .ndo_stop = lance_close, 665 .ndo_start_xmit = lance_start_xmit, 666 .ndo_tx_timeout = lance_tx_timeout, 667 .ndo_set_rx_mode = lance_set_multicast, 668 .ndo_validate_addr = eth_validate_addr, 669 .ndo_change_mtu = eth_change_mtu, 670 .ndo_set_mac_address = eth_mac_addr, 671 }; 672 673 static int a2065_init_one(struct zorro_dev *z, 674 const struct zorro_device_id *ent) 675 { 676 struct net_device *dev; 677 struct lance_private *priv; 678 unsigned long board = z->resource.start; 679 unsigned long base_addr = board + A2065_LANCE; 680 unsigned long mem_start = board + A2065_RAM; 681 struct resource *r1, *r2; 682 u32 serial; 683 int err; 684 685 r1 = request_mem_region(base_addr, sizeof(struct lance_regs), 686 "Am7990"); 687 if (!r1) 688 return -EBUSY; 689 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); 690 if (!r2) { 691 release_mem_region(base_addr, sizeof(struct lance_regs)); 692 return -EBUSY; 693 } 694 695 dev = alloc_etherdev(sizeof(struct lance_private)); 696 if (dev == NULL) { 697 release_mem_region(base_addr, sizeof(struct lance_regs)); 698 release_mem_region(mem_start, A2065_RAM_SIZE); 699 return -ENOMEM; 700 } 701 702 priv = netdev_priv(dev); 703 704 r1->name = dev->name; 705 r2->name = dev->name; 706 707 serial = be32_to_cpu(z->rom.er_SerialNumber); 708 dev->dev_addr[0] = 0x00; 709 if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */ 710 dev->dev_addr[1] = 0x80; 711 dev->dev_addr[2] = 0x10; 712 } else { /* Ameristar */ 713 dev->dev_addr[1] = 0x00; 714 dev->dev_addr[2] = 0x9f; 715 } 716 dev->dev_addr[3] = (serial >> 16) & 0xff; 717 dev->dev_addr[4] = (serial >> 8) & 0xff; 718 dev->dev_addr[5] = serial & 0xff; 719 dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr); 720 dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start); 721 dev->mem_end = dev->mem_start + A2065_RAM_SIZE; 722 723 priv->ll = (volatile struct lance_regs *)dev->base_addr; 724 priv->init_block = (struct lance_init_block *)dev->mem_start; 725 priv->lance_init_block = (struct lance_init_block *)A2065_RAM; 726 priv->auto_select = 0; 727 priv->busmaster_regval = LE_C3_BSWP; 728 729 priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; 730 priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; 731 priv->rx_ring_mod_mask = RX_RING_MOD_MASK; 732 priv->tx_ring_mod_mask = TX_RING_MOD_MASK; 733 734 dev->netdev_ops = &lance_netdev_ops; 735 dev->watchdog_timeo = 5*HZ; 736 dev->dma = 0; 737 738 init_timer(&priv->multicast_timer); 739 priv->multicast_timer.data = (unsigned long) dev; 740 priv->multicast_timer.function = 741 (void (*)(unsigned long))lance_set_multicast; 742 743 err = register_netdev(dev); 744 if (err) { 745 release_mem_region(base_addr, sizeof(struct lance_regs)); 746 release_mem_region(mem_start, A2065_RAM_SIZE); 747 free_netdev(dev); 748 return err; 749 } 750 zorro_set_drvdata(z, dev); 751 752 netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n", 753 board, dev->dev_addr); 754 755 return 0; 756 } 757 758 759 static void a2065_remove_one(struct zorro_dev *z) 760 { 761 struct net_device *dev = zorro_get_drvdata(z); 762 763 unregister_netdev(dev); 764 release_mem_region(ZTWO_PADDR(dev->base_addr), 765 sizeof(struct lance_regs)); 766 release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE); 767 free_netdev(dev); 768 } 769 770 static int __init a2065_init_module(void) 771 { 772 return zorro_register_driver(&a2065_driver); 773 } 774 775 static void __exit a2065_cleanup_module(void) 776 { 777 zorro_unregister_driver(&a2065_driver); 778 } 779 780 module_init(a2065_init_module); 781 module_exit(a2065_cleanup_module); 782 783 MODULE_LICENSE("GPL"); 784