1 // SPDX-License-Identifier: GPL-2.0+ 2 /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. 3 * 4 * Copyright (c) 2018 Maciej W. Rozycki 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * References: 12 * 13 * Dave Sawyer & Phil Weeks & Frank Itkowsky, 14 * "DEC FDDIcontroller 700 Port Specification", 15 * Revision 1.1, Digital Equipment Corporation 16 */ 17 18 /* ------------------------------------------------------------------------- */ 19 /* FZA configurable parameters. */ 20 21 /* The number of transmit ring descriptors; either 0 for 512 or 1 for 1024. */ 22 #define FZA_RING_TX_MODE 0 23 24 /* The number of receive ring descriptors; from 2 up to 256. */ 25 #define FZA_RING_RX_SIZE 256 26 27 /* End of FZA configurable parameters. No need to change anything below. */ 28 /* ------------------------------------------------------------------------- */ 29 30 #include <linux/delay.h> 31 #include <linux/device.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/init.h> 34 #include <linux/interrupt.h> 35 #include <linux/io.h> 36 #include <linux/io-64-nonatomic-lo-hi.h> 37 #include <linux/ioport.h> 38 #include <linux/kernel.h> 39 #include <linux/list.h> 40 #include <linux/module.h> 41 #include <linux/netdevice.h> 42 #include <linux/fddidevice.h> 43 #include <linux/sched.h> 44 #include <linux/skbuff.h> 45 #include <linux/spinlock.h> 46 #include <linux/stat.h> 47 #include <linux/tc.h> 48 #include <linux/timer.h> 49 #include <linux/types.h> 50 #include <linux/wait.h> 51 52 #include <asm/barrier.h> 53 54 #include "defza.h" 55 56 #define DRV_NAME "defza" 57 #define DRV_VERSION "v.1.1.4" 58 #define DRV_RELDATE "Oct 6 2018" 59 60 static const char version[] = 61 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n"; 62 63 MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); 64 MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver"); 65 MODULE_LICENSE("GPL"); 66 67 static int loopback; 68 module_param(loopback, int, 0644); 69 70 /* Ring Purger Multicast */ 71 static u8 hw_addr_purger[8] = { 0x09, 0x00, 0x2b, 0x02, 0x01, 0x05 }; 72 /* Directed Beacon Multicast */ 73 static u8 hw_addr_beacon[8] = { 0x01, 0x80, 0xc2, 0x00, 0x01, 0x00 }; 74 75 /* Shorthands for MMIO accesses that we require to be strongly ordered 76 * WRT preceding MMIO accesses. 77 */ 78 #define readw_o readw_relaxed 79 #define readl_o readl_relaxed 80 81 #define writew_o writew_relaxed 82 #define writel_o writel_relaxed 83 84 /* Shorthands for MMIO accesses that we are happy with being weakly ordered 85 * WRT preceding MMIO accesses. 86 */ 87 #define readw_u readw_relaxed 88 #define readl_u readl_relaxed 89 #define readq_u readq_relaxed 90 91 #define writew_u writew_relaxed 92 #define writel_u writel_relaxed 93 #define writeq_u writeq_relaxed 94 95 static inline struct sk_buff *fza_alloc_skb_irq(struct net_device *dev, 96 unsigned int length) 97 { 98 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 99 } 100 101 static inline struct sk_buff *fza_alloc_skb(struct net_device *dev, 102 unsigned int length) 103 { 104 return __netdev_alloc_skb(dev, length, GFP_KERNEL); 105 } 106 107 static inline void fza_skb_align(struct sk_buff *skb, unsigned int v) 108 { 109 unsigned long x, y; 110 111 x = (unsigned long)skb->data; 112 y = ALIGN(x, v); 113 114 skb_reserve(skb, y - x); 115 } 116 117 static inline void fza_reads(const void __iomem *from, void *to, 118 unsigned long size) 119 { 120 if (sizeof(unsigned long) == 8) { 121 const u64 __iomem *src = from; 122 const u32 __iomem *src_trail; 123 u64 *dst = to; 124 u32 *dst_trail; 125 126 for (size = (size + 3) / 4; size > 1; size -= 2) 127 *dst++ = readq_u(src++); 128 if (size) { 129 src_trail = (u32 __iomem *)src; 130 dst_trail = (u32 *)dst; 131 *dst_trail = readl_u(src_trail); 132 } 133 } else { 134 const u32 __iomem *src = from; 135 u32 *dst = to; 136 137 for (size = (size + 3) / 4; size; size--) 138 *dst++ = readl_u(src++); 139 } 140 } 141 142 static inline void fza_writes(const void *from, void __iomem *to, 143 unsigned long size) 144 { 145 if (sizeof(unsigned long) == 8) { 146 const u64 *src = from; 147 const u32 *src_trail; 148 u64 __iomem *dst = to; 149 u32 __iomem *dst_trail; 150 151 for (size = (size + 3) / 4; size > 1; size -= 2) 152 writeq_u(*src++, dst++); 153 if (size) { 154 src_trail = (u32 *)src; 155 dst_trail = (u32 __iomem *)dst; 156 writel_u(*src_trail, dst_trail); 157 } 158 } else { 159 const u32 *src = from; 160 u32 __iomem *dst = to; 161 162 for (size = (size + 3) / 4; size; size--) 163 writel_u(*src++, dst++); 164 } 165 } 166 167 static inline void fza_moves(const void __iomem *from, void __iomem *to, 168 unsigned long size) 169 { 170 if (sizeof(unsigned long) == 8) { 171 const u64 __iomem *src = from; 172 const u32 __iomem *src_trail; 173 u64 __iomem *dst = to; 174 u32 __iomem *dst_trail; 175 176 for (size = (size + 3) / 4; size > 1; size -= 2) 177 writeq_u(readq_u(src++), dst++); 178 if (size) { 179 src_trail = (u32 __iomem *)src; 180 dst_trail = (u32 __iomem *)dst; 181 writel_u(readl_u(src_trail), dst_trail); 182 } 183 } else { 184 const u32 __iomem *src = from; 185 u32 __iomem *dst = to; 186 187 for (size = (size + 3) / 4; size; size--) 188 writel_u(readl_u(src++), dst++); 189 } 190 } 191 192 static inline void fza_zeros(void __iomem *to, unsigned long size) 193 { 194 if (sizeof(unsigned long) == 8) { 195 u64 __iomem *dst = to; 196 u32 __iomem *dst_trail; 197 198 for (size = (size + 3) / 4; size > 1; size -= 2) 199 writeq_u(0, dst++); 200 if (size) { 201 dst_trail = (u32 __iomem *)dst; 202 writel_u(0, dst_trail); 203 } 204 } else { 205 u32 __iomem *dst = to; 206 207 for (size = (size + 3) / 4; size; size--) 208 writel_u(0, dst++); 209 } 210 } 211 212 static inline void fza_regs_dump(struct fza_private *fp) 213 { 214 pr_debug("%s: iomem registers:\n", fp->name); 215 pr_debug(" reset: 0x%04x\n", readw_o(&fp->regs->reset)); 216 pr_debug(" interrupt event: 0x%04x\n", readw_u(&fp->regs->int_event)); 217 pr_debug(" status: 0x%04x\n", readw_u(&fp->regs->status)); 218 pr_debug(" interrupt mask: 0x%04x\n", readw_u(&fp->regs->int_mask)); 219 pr_debug(" control A: 0x%04x\n", readw_u(&fp->regs->control_a)); 220 pr_debug(" control B: 0x%04x\n", readw_u(&fp->regs->control_b)); 221 } 222 223 static inline void fza_do_reset(struct fza_private *fp) 224 { 225 /* Reset the board. */ 226 writew_o(FZA_RESET_INIT, &fp->regs->reset); 227 readw_o(&fp->regs->reset); /* Synchronize. */ 228 readw_o(&fp->regs->reset); /* Read it back for a small delay. */ 229 writew_o(FZA_RESET_CLR, &fp->regs->reset); 230 231 /* Enable all interrupt events we handle. */ 232 writew_o(fp->int_mask, &fp->regs->int_mask); 233 readw_o(&fp->regs->int_mask); /* Synchronize. */ 234 } 235 236 static inline void fza_do_shutdown(struct fza_private *fp) 237 { 238 /* Disable the driver mode. */ 239 writew_o(FZA_CONTROL_B_IDLE, &fp->regs->control_b); 240 241 /* And reset the board. */ 242 writew_o(FZA_RESET_INIT, &fp->regs->reset); 243 readw_o(&fp->regs->reset); /* Synchronize. */ 244 writew_o(FZA_RESET_CLR, &fp->regs->reset); 245 readw_o(&fp->regs->reset); /* Synchronize. */ 246 } 247 248 static int fza_reset(struct fza_private *fp) 249 { 250 unsigned long flags; 251 uint status, state; 252 long t; 253 254 pr_info("%s: resetting the board...\n", fp->name); 255 256 spin_lock_irqsave(&fp->lock, flags); 257 fp->state_chg_flag = 0; 258 fza_do_reset(fp); 259 spin_unlock_irqrestore(&fp->lock, flags); 260 261 /* DEC says RESET needs up to 30 seconds to complete. My DEFZA-AA 262 * rev. C03 happily finishes in 9.7 seconds. :-) But we need to 263 * be on the safe side... 264 */ 265 t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag, 266 45 * HZ); 267 status = readw_u(&fp->regs->status); 268 state = FZA_STATUS_GET_STATE(status); 269 if (fp->state_chg_flag == 0) { 270 pr_err("%s: RESET timed out!, state %x\n", fp->name, state); 271 return -EIO; 272 } 273 if (state != FZA_STATE_UNINITIALIZED) { 274 pr_err("%s: RESET failed!, state %x, failure ID %x\n", 275 fp->name, state, FZA_STATUS_GET_TEST(status)); 276 return -EIO; 277 } 278 pr_info("%s: OK\n", fp->name); 279 pr_debug("%s: RESET: %lums elapsed\n", fp->name, 280 (45 * HZ - t) * 1000 / HZ); 281 282 return 0; 283 } 284 285 static struct fza_ring_cmd __iomem *fza_cmd_send(struct net_device *dev, 286 int command) 287 { 288 struct fza_private *fp = netdev_priv(dev); 289 struct fza_ring_cmd __iomem *ring = fp->ring_cmd + fp->ring_cmd_index; 290 unsigned int old_mask, new_mask; 291 union fza_cmd_buf __iomem *buf; 292 struct netdev_hw_addr *ha; 293 int i; 294 295 old_mask = fp->int_mask; 296 new_mask = old_mask & ~FZA_MASK_STATE_CHG; 297 writew_u(new_mask, &fp->regs->int_mask); 298 readw_o(&fp->regs->int_mask); /* Synchronize. */ 299 fp->int_mask = new_mask; 300 301 buf = fp->mmio + readl_u(&ring->buffer); 302 303 if ((readl_u(&ring->cmd_own) & FZA_RING_OWN_MASK) != 304 FZA_RING_OWN_HOST) { 305 pr_warn("%s: command buffer full, command: %u!\n", fp->name, 306 command); 307 return NULL; 308 } 309 310 switch (command) { 311 case FZA_RING_CMD_INIT: 312 writel_u(FZA_RING_TX_MODE, &buf->init.tx_mode); 313 writel_u(FZA_RING_RX_SIZE, &buf->init.hst_rx_size); 314 fza_zeros(&buf->init.counters, sizeof(buf->init.counters)); 315 break; 316 317 case FZA_RING_CMD_MODCAM: 318 i = 0; 319 fza_writes(&hw_addr_purger, &buf->cam.hw_addr[i++], 320 sizeof(*buf->cam.hw_addr)); 321 fza_writes(&hw_addr_beacon, &buf->cam.hw_addr[i++], 322 sizeof(*buf->cam.hw_addr)); 323 netdev_for_each_mc_addr(ha, dev) { 324 if (i >= FZA_CMD_CAM_SIZE) 325 break; 326 fza_writes(ha->addr, &buf->cam.hw_addr[i++], 327 sizeof(*buf->cam.hw_addr)); 328 } 329 while (i < FZA_CMD_CAM_SIZE) 330 fza_zeros(&buf->cam.hw_addr[i++], 331 sizeof(*buf->cam.hw_addr)); 332 break; 333 334 case FZA_RING_CMD_PARAM: 335 writel_u(loopback, &buf->param.loop_mode); 336 writel_u(fp->t_max, &buf->param.t_max); 337 writel_u(fp->t_req, &buf->param.t_req); 338 writel_u(fp->tvx, &buf->param.tvx); 339 writel_u(fp->lem_threshold, &buf->param.lem_threshold); 340 fza_writes(&fp->station_id, &buf->param.station_id, 341 sizeof(buf->param.station_id)); 342 /* Convert to milliseconds due to buggy firmware. */ 343 writel_u(fp->rtoken_timeout / 12500, 344 &buf->param.rtoken_timeout); 345 writel_u(fp->ring_purger, &buf->param.ring_purger); 346 break; 347 348 case FZA_RING_CMD_MODPROM: 349 if (dev->flags & IFF_PROMISC) { 350 writel_u(1, &buf->modprom.llc_prom); 351 writel_u(1, &buf->modprom.smt_prom); 352 } else { 353 writel_u(0, &buf->modprom.llc_prom); 354 writel_u(0, &buf->modprom.smt_prom); 355 } 356 if (dev->flags & IFF_ALLMULTI || 357 netdev_mc_count(dev) > FZA_CMD_CAM_SIZE - 2) 358 writel_u(1, &buf->modprom.llc_multi); 359 else 360 writel_u(0, &buf->modprom.llc_multi); 361 writel_u(1, &buf->modprom.llc_bcast); 362 break; 363 } 364 365 /* Trigger the command. */ 366 writel_u(FZA_RING_OWN_FZA | command, &ring->cmd_own); 367 writew_o(FZA_CONTROL_A_CMD_POLL, &fp->regs->control_a); 368 369 fp->ring_cmd_index = (fp->ring_cmd_index + 1) % FZA_RING_CMD_SIZE; 370 371 fp->int_mask = old_mask; 372 writew_u(fp->int_mask, &fp->regs->int_mask); 373 374 return ring; 375 } 376 377 static int fza_init_send(struct net_device *dev, 378 struct fza_cmd_init *__iomem *init) 379 { 380 struct fza_private *fp = netdev_priv(dev); 381 struct fza_ring_cmd __iomem *ring; 382 unsigned long flags; 383 u32 stat; 384 long t; 385 386 spin_lock_irqsave(&fp->lock, flags); 387 fp->cmd_done_flag = 0; 388 ring = fza_cmd_send(dev, FZA_RING_CMD_INIT); 389 spin_unlock_irqrestore(&fp->lock, flags); 390 if (!ring) 391 /* This should never happen in the uninitialized state, 392 * so do not try to recover and just consider it fatal. 393 */ 394 return -ENOBUFS; 395 396 /* INIT may take quite a long time (160ms for my C03). */ 397 t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ); 398 if (fp->cmd_done_flag == 0) { 399 pr_err("%s: INIT command timed out!, state %x\n", fp->name, 400 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 401 return -EIO; 402 } 403 stat = readl_u(&ring->stat); 404 if (stat != FZA_RING_STAT_SUCCESS) { 405 pr_err("%s: INIT command failed!, status %02x, state %x\n", 406 fp->name, stat, 407 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 408 return -EIO; 409 } 410 pr_debug("%s: INIT: %lums elapsed\n", fp->name, 411 (3 * HZ - t) * 1000 / HZ); 412 413 if (init) 414 *init = fp->mmio + readl_u(&ring->buffer); 415 return 0; 416 } 417 418 static void fza_rx_init(struct fza_private *fp) 419 { 420 int i; 421 422 /* Fill the host receive descriptor ring. */ 423 for (i = 0; i < FZA_RING_RX_SIZE; i++) { 424 writel_o(0, &fp->ring_hst_rx[i].rmc); 425 writel_o((fp->rx_dma[i] + 0x1000) >> 9, 426 &fp->ring_hst_rx[i].buffer1); 427 writel_o(fp->rx_dma[i] >> 9 | FZA_RING_OWN_FZA, 428 &fp->ring_hst_rx[i].buf0_own); 429 } 430 } 431 432 static void fza_set_rx_mode(struct net_device *dev) 433 { 434 fza_cmd_send(dev, FZA_RING_CMD_MODCAM); 435 fza_cmd_send(dev, FZA_RING_CMD_MODPROM); 436 } 437 438 union fza_buffer_txp { 439 struct fza_buffer_tx *data_ptr; 440 struct fza_buffer_tx __iomem *mmio_ptr; 441 }; 442 443 static int fza_do_xmit(union fza_buffer_txp ub, int len, 444 struct net_device *dev, int smt) 445 { 446 struct fza_private *fp = netdev_priv(dev); 447 struct fza_buffer_tx __iomem *rmc_tx_ptr; 448 int i, first, frag_len, left_len; 449 u32 own, rmc; 450 451 if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) - 452 fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) * 453 FZA_TX_BUFFER_SIZE) < len) 454 return 1; 455 456 first = fp->ring_rmc_tx_index; 457 458 left_len = len; 459 frag_len = FZA_TX_BUFFER_SIZE; 460 /* First descriptor is relinquished last. */ 461 own = FZA_RING_TX_OWN_HOST; 462 /* First descriptor carries frame length; we don't use cut-through. */ 463 rmc = FZA_RING_TX_SOP | FZA_RING_TX_VBC | len; 464 do { 465 i = fp->ring_rmc_tx_index; 466 rmc_tx_ptr = &fp->buffer_tx[i]; 467 468 if (left_len < FZA_TX_BUFFER_SIZE) 469 frag_len = left_len; 470 left_len -= frag_len; 471 472 /* Length must be a multiple of 4 as only word writes are 473 * permitted! 474 */ 475 frag_len = (frag_len + 3) & ~3; 476 if (smt) 477 fza_moves(ub.mmio_ptr, rmc_tx_ptr, frag_len); 478 else 479 fza_writes(ub.data_ptr, rmc_tx_ptr, frag_len); 480 481 if (left_len == 0) 482 rmc |= FZA_RING_TX_EOP; /* Mark last frag. */ 483 484 writel_o(rmc, &fp->ring_rmc_tx[i].rmc); 485 writel_o(own, &fp->ring_rmc_tx[i].own); 486 487 ub.data_ptr++; 488 fp->ring_rmc_tx_index = (fp->ring_rmc_tx_index + 1) % 489 fp->ring_rmc_tx_size; 490 491 /* Settings for intermediate frags. */ 492 own = FZA_RING_TX_OWN_RMC; 493 rmc = 0; 494 } while (left_len > 0); 495 496 if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) - 497 fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) * 498 FZA_TX_BUFFER_SIZE) < dev->mtu + dev->hard_header_len) { 499 netif_stop_queue(dev); 500 pr_debug("%s: queue stopped\n", fp->name); 501 } 502 503 writel_o(FZA_RING_TX_OWN_RMC, &fp->ring_rmc_tx[first].own); 504 505 /* Go, go, go! */ 506 writew_o(FZA_CONTROL_A_TX_POLL, &fp->regs->control_a); 507 508 return 0; 509 } 510 511 static int fza_do_recv_smt(struct fza_buffer_tx *data_ptr, int len, 512 u32 rmc, struct net_device *dev) 513 { 514 struct fza_private *fp = netdev_priv(dev); 515 struct fza_buffer_tx __iomem *smt_rx_ptr; 516 u32 own; 517 int i; 518 519 i = fp->ring_smt_rx_index; 520 own = readl_o(&fp->ring_smt_rx[i].own); 521 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 522 return 1; 523 524 smt_rx_ptr = fp->mmio + readl_u(&fp->ring_smt_rx[i].buffer); 525 526 /* Length must be a multiple of 4 as only word writes are permitted! */ 527 fza_writes(data_ptr, smt_rx_ptr, (len + 3) & ~3); 528 529 writel_o(rmc, &fp->ring_smt_rx[i].rmc); 530 writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_rx[i].own); 531 532 fp->ring_smt_rx_index = 533 (fp->ring_smt_rx_index + 1) % fp->ring_smt_rx_size; 534 535 /* Grab it! */ 536 writew_o(FZA_CONTROL_A_SMT_RX_POLL, &fp->regs->control_a); 537 538 return 0; 539 } 540 541 static void fza_tx(struct net_device *dev) 542 { 543 struct fza_private *fp = netdev_priv(dev); 544 u32 own, rmc; 545 int i; 546 547 while (1) { 548 i = fp->ring_rmc_txd_index; 549 if (i == fp->ring_rmc_tx_index) 550 break; 551 own = readl_o(&fp->ring_rmc_tx[i].own); 552 if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) 553 break; 554 555 rmc = readl_u(&fp->ring_rmc_tx[i].rmc); 556 /* Only process the first descriptor. */ 557 if ((rmc & FZA_RING_TX_SOP) != 0) { 558 if ((rmc & FZA_RING_TX_DCC_MASK) == 559 FZA_RING_TX_DCC_SUCCESS) { 560 int pkt_len = (rmc & FZA_RING_PBC_MASK) - 3; 561 /* Omit PRH. */ 562 563 fp->stats.tx_packets++; 564 fp->stats.tx_bytes += pkt_len; 565 } else { 566 fp->stats.tx_errors++; 567 switch (rmc & FZA_RING_TX_DCC_MASK) { 568 case FZA_RING_TX_DCC_DTP_SOP: 569 case FZA_RING_TX_DCC_DTP: 570 case FZA_RING_TX_DCC_ABORT: 571 fp->stats.tx_aborted_errors++; 572 break; 573 case FZA_RING_TX_DCC_UNDRRUN: 574 fp->stats.tx_fifo_errors++; 575 break; 576 case FZA_RING_TX_DCC_PARITY: 577 default: 578 break; 579 } 580 } 581 } 582 583 fp->ring_rmc_txd_index = (fp->ring_rmc_txd_index + 1) % 584 fp->ring_rmc_tx_size; 585 } 586 587 if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) - 588 fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) * 589 FZA_TX_BUFFER_SIZE) >= dev->mtu + dev->hard_header_len) { 590 if (fp->queue_active) { 591 netif_wake_queue(dev); 592 pr_debug("%s: queue woken\n", fp->name); 593 } 594 } 595 } 596 597 static inline int fza_rx_err(struct fza_private *fp, 598 const u32 rmc, const u8 fc) 599 { 600 int len, min_len, max_len; 601 602 len = rmc & FZA_RING_PBC_MASK; 603 604 if (unlikely((rmc & FZA_RING_RX_BAD) != 0)) { 605 fp->stats.rx_errors++; 606 607 /* Check special status codes. */ 608 if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK | 609 FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) == 610 (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR | 611 FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_ALIAS)) { 612 if (len >= 8190) 613 fp->stats.rx_length_errors++; 614 return 1; 615 } 616 if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK | 617 FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) == 618 (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR | 619 FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_CAM)) { 620 /* Halt the interface to trigger a reset. */ 621 writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a); 622 readw_o(&fp->regs->control_a); /* Synchronize. */ 623 return 1; 624 } 625 626 /* Check the MAC status. */ 627 switch (rmc & FZA_RING_RX_RRR_MASK) { 628 case FZA_RING_RX_RRR_OK: 629 if ((rmc & FZA_RING_RX_CRC) != 0) 630 fp->stats.rx_crc_errors++; 631 else if ((rmc & FZA_RING_RX_FSC_MASK) == 0 || 632 (rmc & FZA_RING_RX_FSB_ERR) != 0) 633 fp->stats.rx_frame_errors++; 634 return 1; 635 case FZA_RING_RX_RRR_SADDR: 636 case FZA_RING_RX_RRR_DADDR: 637 case FZA_RING_RX_RRR_ABORT: 638 /* Halt the interface to trigger a reset. */ 639 writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a); 640 readw_o(&fp->regs->control_a); /* Synchronize. */ 641 return 1; 642 case FZA_RING_RX_RRR_LENGTH: 643 fp->stats.rx_frame_errors++; 644 return 1; 645 default: 646 return 1; 647 } 648 } 649 650 /* Packet received successfully; validate the length. */ 651 switch (fc & FDDI_FC_K_FORMAT_MASK) { 652 case FDDI_FC_K_FORMAT_MANAGEMENT: 653 if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_ASYNC) 654 min_len = 37; 655 else 656 min_len = 17; 657 break; 658 case FDDI_FC_K_FORMAT_LLC: 659 min_len = 20; 660 break; 661 default: 662 min_len = 17; 663 break; 664 } 665 max_len = 4495; 666 if (len < min_len || len > max_len) { 667 fp->stats.rx_errors++; 668 fp->stats.rx_length_errors++; 669 return 1; 670 } 671 672 return 0; 673 } 674 675 static void fza_rx(struct net_device *dev) 676 { 677 struct fza_private *fp = netdev_priv(dev); 678 struct sk_buff *skb, *newskb; 679 struct fza_fddihdr *frame; 680 dma_addr_t dma, newdma; 681 u32 own, rmc, buf; 682 int i, len; 683 u8 fc; 684 685 while (1) { 686 i = fp->ring_hst_rx_index; 687 own = readl_o(&fp->ring_hst_rx[i].buf0_own); 688 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 689 break; 690 691 rmc = readl_u(&fp->ring_hst_rx[i].rmc); 692 skb = fp->rx_skbuff[i]; 693 dma = fp->rx_dma[i]; 694 695 /* The RMC doesn't count the preamble and the starting 696 * delimiter. We fix it up here for a total of 3 octets. 697 */ 698 dma_rmb(); 699 len = (rmc & FZA_RING_PBC_MASK) + 3; 700 frame = (struct fza_fddihdr *)skb->data; 701 702 /* We need to get at real FC. */ 703 dma_sync_single_for_cpu(fp->bdev, 704 dma + 705 ((u8 *)&frame->hdr.fc - (u8 *)frame), 706 sizeof(frame->hdr.fc), 707 DMA_FROM_DEVICE); 708 fc = frame->hdr.fc; 709 710 if (fza_rx_err(fp, rmc, fc)) 711 goto err_rx; 712 713 /* We have to 512-byte-align RX buffers... */ 714 newskb = fza_alloc_skb_irq(dev, FZA_RX_BUFFER_SIZE + 511); 715 if (newskb) { 716 fza_skb_align(newskb, 512); 717 newdma = dma_map_single(fp->bdev, newskb->data, 718 FZA_RX_BUFFER_SIZE, 719 DMA_FROM_DEVICE); 720 if (dma_mapping_error(fp->bdev, newdma)) { 721 dev_kfree_skb_irq(newskb); 722 newskb = NULL; 723 } 724 } 725 if (newskb) { 726 int pkt_len = len - 7; /* Omit P, SD and FCS. */ 727 int is_multi; 728 int rx_stat; 729 730 dma_unmap_single(fp->bdev, dma, FZA_RX_BUFFER_SIZE, 731 DMA_FROM_DEVICE); 732 733 /* Queue SMT frames to the SMT receive ring. */ 734 if ((fc & (FDDI_FC_K_CLASS_MASK | 735 FDDI_FC_K_FORMAT_MASK)) == 736 (FDDI_FC_K_CLASS_ASYNC | 737 FDDI_FC_K_FORMAT_MANAGEMENT) && 738 (rmc & FZA_RING_RX_DA_MASK) != 739 FZA_RING_RX_DA_PROM) { 740 if (fza_do_recv_smt((struct fza_buffer_tx *) 741 skb->data, len, rmc, 742 dev)) { 743 writel_o(FZA_CONTROL_A_SMT_RX_OVFL, 744 &fp->regs->control_a); 745 } 746 } 747 748 is_multi = ((frame->hdr.daddr[0] & 0x01) != 0); 749 750 skb_reserve(skb, 3); /* Skip over P and SD. */ 751 skb_put(skb, pkt_len); /* And cut off FCS. */ 752 skb->protocol = fddi_type_trans(skb, dev); 753 754 rx_stat = netif_rx(skb); 755 if (rx_stat != NET_RX_DROP) { 756 fp->stats.rx_packets++; 757 fp->stats.rx_bytes += pkt_len; 758 if (is_multi) 759 fp->stats.multicast++; 760 } else { 761 fp->stats.rx_dropped++; 762 } 763 764 skb = newskb; 765 dma = newdma; 766 fp->rx_skbuff[i] = skb; 767 fp->rx_dma[i] = dma; 768 } else { 769 fp->stats.rx_dropped++; 770 pr_notice("%s: memory squeeze, dropping packet\n", 771 fp->name); 772 } 773 774 err_rx: 775 writel_o(0, &fp->ring_hst_rx[i].rmc); 776 buf = (dma + 0x1000) >> 9; 777 writel_o(buf, &fp->ring_hst_rx[i].buffer1); 778 buf = dma >> 9 | FZA_RING_OWN_FZA; 779 writel_o(buf, &fp->ring_hst_rx[i].buf0_own); 780 fp->ring_hst_rx_index = 781 (fp->ring_hst_rx_index + 1) % fp->ring_hst_rx_size; 782 } 783 } 784 785 static void fza_tx_smt(struct net_device *dev) 786 { 787 struct fza_private *fp = netdev_priv(dev); 788 struct fza_buffer_tx __iomem *smt_tx_ptr; 789 int i, len; 790 u32 own; 791 792 while (1) { 793 i = fp->ring_smt_tx_index; 794 own = readl_o(&fp->ring_smt_tx[i].own); 795 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 796 break; 797 798 smt_tx_ptr = fp->mmio + readl_u(&fp->ring_smt_tx[i].buffer); 799 len = readl_u(&fp->ring_smt_tx[i].rmc) & FZA_RING_PBC_MASK; 800 801 if (!netif_queue_stopped(dev)) { 802 if (dev_nit_active(dev)) { 803 struct fza_buffer_tx *skb_data_ptr; 804 struct sk_buff *skb; 805 806 /* Length must be a multiple of 4 as only word 807 * reads are permitted! 808 */ 809 skb = fza_alloc_skb_irq(dev, (len + 3) & ~3); 810 if (!skb) 811 goto err_no_skb; /* Drop. */ 812 813 skb_data_ptr = (struct fza_buffer_tx *) 814 skb->data; 815 816 fza_reads(smt_tx_ptr, skb_data_ptr, 817 (len + 3) & ~3); 818 skb->dev = dev; 819 skb_reserve(skb, 3); /* Skip over PRH. */ 820 skb_put(skb, len - 3); 821 skb_reset_network_header(skb); 822 823 dev_queue_xmit_nit(skb, dev); 824 825 dev_kfree_skb_irq(skb); 826 827 err_no_skb: 828 ; 829 } 830 831 /* Queue the frame to the RMC transmit ring. */ 832 fza_do_xmit((union fza_buffer_txp) 833 { .mmio_ptr = smt_tx_ptr }, 834 len, dev, 1); 835 } 836 837 writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own); 838 fp->ring_smt_tx_index = 839 (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size; 840 } 841 } 842 843 static void fza_uns(struct net_device *dev) 844 { 845 struct fza_private *fp = netdev_priv(dev); 846 u32 own; 847 int i; 848 849 while (1) { 850 i = fp->ring_uns_index; 851 own = readl_o(&fp->ring_uns[i].own); 852 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 853 break; 854 855 if (readl_u(&fp->ring_uns[i].id) == FZA_RING_UNS_RX_OVER) { 856 fp->stats.rx_errors++; 857 fp->stats.rx_over_errors++; 858 } 859 860 writel_o(FZA_RING_OWN_FZA, &fp->ring_uns[i].own); 861 fp->ring_uns_index = 862 (fp->ring_uns_index + 1) % FZA_RING_UNS_SIZE; 863 } 864 } 865 866 static void fza_tx_flush(struct net_device *dev) 867 { 868 struct fza_private *fp = netdev_priv(dev); 869 u32 own; 870 int i; 871 872 /* Clean up the SMT TX ring. */ 873 i = fp->ring_smt_tx_index; 874 do { 875 writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own); 876 fp->ring_smt_tx_index = 877 (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size; 878 879 } while (i != fp->ring_smt_tx_index); 880 881 /* Clean up the RMC TX ring. */ 882 i = fp->ring_rmc_tx_index; 883 do { 884 own = readl_o(&fp->ring_rmc_tx[i].own); 885 if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) { 886 u32 rmc = readl_u(&fp->ring_rmc_tx[i].rmc); 887 888 writel_u(rmc | FZA_RING_TX_DTP, 889 &fp->ring_rmc_tx[i].rmc); 890 } 891 fp->ring_rmc_tx_index = 892 (fp->ring_rmc_tx_index + 1) % fp->ring_rmc_tx_size; 893 894 } while (i != fp->ring_rmc_tx_index); 895 896 /* Done. */ 897 writew_o(FZA_CONTROL_A_FLUSH_DONE, &fp->regs->control_a); 898 } 899 900 static irqreturn_t fza_interrupt(int irq, void *dev_id) 901 { 902 struct net_device *dev = dev_id; 903 struct fza_private *fp = netdev_priv(dev); 904 uint int_event; 905 906 /* Get interrupt events. */ 907 int_event = readw_o(&fp->regs->int_event) & fp->int_mask; 908 if (int_event == 0) 909 return IRQ_NONE; 910 911 /* Clear the events. */ 912 writew_u(int_event, &fp->regs->int_event); 913 914 /* Now handle the events. The order matters. */ 915 916 /* Command finished interrupt. */ 917 if ((int_event & FZA_EVENT_CMD_DONE) != 0) { 918 fp->irq_count_cmd_done++; 919 920 spin_lock(&fp->lock); 921 fp->cmd_done_flag = 1; 922 wake_up(&fp->cmd_done_wait); 923 spin_unlock(&fp->lock); 924 } 925 926 /* Transmit finished interrupt. */ 927 if ((int_event & FZA_EVENT_TX_DONE) != 0) { 928 fp->irq_count_tx_done++; 929 fza_tx(dev); 930 } 931 932 /* Host receive interrupt. */ 933 if ((int_event & FZA_EVENT_RX_POLL) != 0) { 934 fp->irq_count_rx_poll++; 935 fza_rx(dev); 936 } 937 938 /* SMT transmit interrupt. */ 939 if ((int_event & FZA_EVENT_SMT_TX_POLL) != 0) { 940 fp->irq_count_smt_tx_poll++; 941 fza_tx_smt(dev); 942 } 943 944 /* Transmit ring flush request. */ 945 if ((int_event & FZA_EVENT_FLUSH_TX) != 0) { 946 fp->irq_count_flush_tx++; 947 fza_tx_flush(dev); 948 } 949 950 /* Link status change interrupt. */ 951 if ((int_event & FZA_EVENT_LINK_ST_CHG) != 0) { 952 uint status; 953 954 fp->irq_count_link_st_chg++; 955 status = readw_u(&fp->regs->status); 956 if (FZA_STATUS_GET_LINK(status) == FZA_LINK_ON) { 957 netif_carrier_on(dev); 958 pr_info("%s: link available\n", fp->name); 959 } else { 960 netif_carrier_off(dev); 961 pr_info("%s: link unavailable\n", fp->name); 962 } 963 } 964 965 /* Unsolicited event interrupt. */ 966 if ((int_event & FZA_EVENT_UNS_POLL) != 0) { 967 fp->irq_count_uns_poll++; 968 fza_uns(dev); 969 } 970 971 /* State change interrupt. */ 972 if ((int_event & FZA_EVENT_STATE_CHG) != 0) { 973 uint status, state; 974 975 fp->irq_count_state_chg++; 976 977 status = readw_u(&fp->regs->status); 978 state = FZA_STATUS_GET_STATE(status); 979 pr_debug("%s: state change: %x\n", fp->name, state); 980 switch (state) { 981 case FZA_STATE_RESET: 982 break; 983 984 case FZA_STATE_UNINITIALIZED: 985 netif_carrier_off(dev); 986 del_timer_sync(&fp->reset_timer); 987 fp->ring_cmd_index = 0; 988 fp->ring_uns_index = 0; 989 fp->ring_rmc_tx_index = 0; 990 fp->ring_rmc_txd_index = 0; 991 fp->ring_hst_rx_index = 0; 992 fp->ring_smt_tx_index = 0; 993 fp->ring_smt_rx_index = 0; 994 if (fp->state > state) { 995 pr_info("%s: OK\n", fp->name); 996 fza_cmd_send(dev, FZA_RING_CMD_INIT); 997 } 998 break; 999 1000 case FZA_STATE_INITIALIZED: 1001 if (fp->state > state) { 1002 fza_set_rx_mode(dev); 1003 fza_cmd_send(dev, FZA_RING_CMD_PARAM); 1004 } 1005 break; 1006 1007 case FZA_STATE_RUNNING: 1008 case FZA_STATE_MAINTENANCE: 1009 fp->state = state; 1010 fza_rx_init(fp); 1011 fp->queue_active = 1; 1012 netif_wake_queue(dev); 1013 pr_debug("%s: queue woken\n", fp->name); 1014 break; 1015 1016 case FZA_STATE_HALTED: 1017 fp->queue_active = 0; 1018 netif_stop_queue(dev); 1019 pr_debug("%s: queue stopped\n", fp->name); 1020 del_timer_sync(&fp->reset_timer); 1021 pr_warn("%s: halted, reason: %x\n", fp->name, 1022 FZA_STATUS_GET_HALT(status)); 1023 fza_regs_dump(fp); 1024 pr_info("%s: resetting the board...\n", fp->name); 1025 fza_do_reset(fp); 1026 fp->timer_state = 0; 1027 fp->reset_timer.expires = jiffies + 45 * HZ; 1028 add_timer(&fp->reset_timer); 1029 break; 1030 1031 default: 1032 pr_warn("%s: undefined state: %x\n", fp->name, state); 1033 break; 1034 } 1035 1036 spin_lock(&fp->lock); 1037 fp->state_chg_flag = 1; 1038 wake_up(&fp->state_chg_wait); 1039 spin_unlock(&fp->lock); 1040 } 1041 1042 return IRQ_HANDLED; 1043 } 1044 1045 static void fza_reset_timer(struct timer_list *t) 1046 { 1047 struct fza_private *fp = from_timer(fp, t, reset_timer); 1048 1049 if (!fp->timer_state) { 1050 pr_err("%s: RESET timed out!\n", fp->name); 1051 pr_info("%s: trying harder...\n", fp->name); 1052 1053 /* Assert the board reset. */ 1054 writew_o(FZA_RESET_INIT, &fp->regs->reset); 1055 readw_o(&fp->regs->reset); /* Synchronize. */ 1056 1057 fp->timer_state = 1; 1058 fp->reset_timer.expires = jiffies + HZ; 1059 } else { 1060 /* Clear the board reset. */ 1061 writew_u(FZA_RESET_CLR, &fp->regs->reset); 1062 1063 /* Enable all interrupt events we handle. */ 1064 writew_o(fp->int_mask, &fp->regs->int_mask); 1065 readw_o(&fp->regs->int_mask); /* Synchronize. */ 1066 1067 fp->timer_state = 0; 1068 fp->reset_timer.expires = jiffies + 45 * HZ; 1069 } 1070 add_timer(&fp->reset_timer); 1071 } 1072 1073 static int fza_set_mac_address(struct net_device *dev, void *addr) 1074 { 1075 return -EOPNOTSUPP; 1076 } 1077 1078 static netdev_tx_t fza_start_xmit(struct sk_buff *skb, struct net_device *dev) 1079 { 1080 struct fza_private *fp = netdev_priv(dev); 1081 unsigned int old_mask, new_mask; 1082 int ret; 1083 u8 fc; 1084 1085 skb_push(skb, 3); /* Make room for PRH. */ 1086 1087 /* Decode FC to set PRH. */ 1088 fc = skb->data[3]; 1089 skb->data[0] = 0; 1090 skb->data[1] = 0; 1091 skb->data[2] = FZA_PRH2_NORMAL; 1092 if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_SYNC) 1093 skb->data[0] |= FZA_PRH0_FRAME_SYNC; 1094 switch (fc & FDDI_FC_K_FORMAT_MASK) { 1095 case FDDI_FC_K_FORMAT_MANAGEMENT: 1096 if ((fc & FDDI_FC_K_CONTROL_MASK) == 0) { 1097 /* Token. */ 1098 skb->data[0] |= FZA_PRH0_TKN_TYPE_IMM; 1099 skb->data[1] |= FZA_PRH1_TKN_SEND_NONE; 1100 } else { 1101 /* SMT or MAC. */ 1102 skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR; 1103 skb->data[1] |= FZA_PRH1_TKN_SEND_UNR; 1104 } 1105 skb->data[1] |= FZA_PRH1_CRC_NORMAL; 1106 break; 1107 case FDDI_FC_K_FORMAT_LLC: 1108 case FDDI_FC_K_FORMAT_FUTURE: 1109 skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR; 1110 skb->data[1] |= FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR; 1111 break; 1112 case FDDI_FC_K_FORMAT_IMPLEMENTOR: 1113 skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR; 1114 skb->data[1] |= FZA_PRH1_TKN_SEND_ORIG; 1115 break; 1116 } 1117 1118 /* SMT transmit interrupts may sneak frames into the RMC 1119 * transmit ring. We disable them while queueing a frame 1120 * to maintain consistency. 1121 */ 1122 old_mask = fp->int_mask; 1123 new_mask = old_mask & ~FZA_MASK_SMT_TX_POLL; 1124 writew_u(new_mask, &fp->regs->int_mask); 1125 readw_o(&fp->regs->int_mask); /* Synchronize. */ 1126 fp->int_mask = new_mask; 1127 ret = fza_do_xmit((union fza_buffer_txp) 1128 { .data_ptr = (struct fza_buffer_tx *)skb->data }, 1129 skb->len, dev, 0); 1130 fp->int_mask = old_mask; 1131 writew_u(fp->int_mask, &fp->regs->int_mask); 1132 1133 if (ret) { 1134 /* Probably an SMT packet filled the remaining space, 1135 * so just stop the queue, but don't report it as an error. 1136 */ 1137 netif_stop_queue(dev); 1138 pr_debug("%s: queue stopped\n", fp->name); 1139 fp->stats.tx_dropped++; 1140 } 1141 1142 dev_kfree_skb(skb); 1143 1144 return ret; 1145 } 1146 1147 static int fza_open(struct net_device *dev) 1148 { 1149 struct fza_private *fp = netdev_priv(dev); 1150 struct fza_ring_cmd __iomem *ring; 1151 struct sk_buff *skb; 1152 unsigned long flags; 1153 dma_addr_t dma; 1154 int ret, i; 1155 u32 stat; 1156 long t; 1157 1158 for (i = 0; i < FZA_RING_RX_SIZE; i++) { 1159 /* We have to 512-byte-align RX buffers... */ 1160 skb = fza_alloc_skb(dev, FZA_RX_BUFFER_SIZE + 511); 1161 if (skb) { 1162 fza_skb_align(skb, 512); 1163 dma = dma_map_single(fp->bdev, skb->data, 1164 FZA_RX_BUFFER_SIZE, 1165 DMA_FROM_DEVICE); 1166 if (dma_mapping_error(fp->bdev, dma)) { 1167 dev_kfree_skb(skb); 1168 skb = NULL; 1169 } 1170 } 1171 if (!skb) { 1172 for (--i; i >= 0; i--) { 1173 dma_unmap_single(fp->bdev, fp->rx_dma[i], 1174 FZA_RX_BUFFER_SIZE, 1175 DMA_FROM_DEVICE); 1176 dev_kfree_skb(fp->rx_skbuff[i]); 1177 fp->rx_dma[i] = 0; 1178 fp->rx_skbuff[i] = NULL; 1179 } 1180 return -ENOMEM; 1181 } 1182 fp->rx_skbuff[i] = skb; 1183 fp->rx_dma[i] = dma; 1184 } 1185 1186 ret = fza_init_send(dev, NULL); 1187 if (ret != 0) 1188 return ret; 1189 1190 /* Purger and Beacon multicasts need to be supplied before PARAM. */ 1191 fza_set_rx_mode(dev); 1192 1193 spin_lock_irqsave(&fp->lock, flags); 1194 fp->cmd_done_flag = 0; 1195 ring = fza_cmd_send(dev, FZA_RING_CMD_PARAM); 1196 spin_unlock_irqrestore(&fp->lock, flags); 1197 if (!ring) 1198 return -ENOBUFS; 1199 1200 t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ); 1201 if (fp->cmd_done_flag == 0) { 1202 pr_err("%s: PARAM command timed out!, state %x\n", fp->name, 1203 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 1204 return -EIO; 1205 } 1206 stat = readl_u(&ring->stat); 1207 if (stat != FZA_RING_STAT_SUCCESS) { 1208 pr_err("%s: PARAM command failed!, status %02x, state %x\n", 1209 fp->name, stat, 1210 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 1211 return -EIO; 1212 } 1213 pr_debug("%s: PARAM: %lums elapsed\n", fp->name, 1214 (3 * HZ - t) * 1000 / HZ); 1215 1216 return 0; 1217 } 1218 1219 static int fza_close(struct net_device *dev) 1220 { 1221 struct fza_private *fp = netdev_priv(dev); 1222 unsigned long flags; 1223 uint state; 1224 long t; 1225 int i; 1226 1227 netif_stop_queue(dev); 1228 pr_debug("%s: queue stopped\n", fp->name); 1229 1230 del_timer_sync(&fp->reset_timer); 1231 spin_lock_irqsave(&fp->lock, flags); 1232 fp->state = FZA_STATE_UNINITIALIZED; 1233 fp->state_chg_flag = 0; 1234 /* Shut the interface down. */ 1235 writew_o(FZA_CONTROL_A_SHUT, &fp->regs->control_a); 1236 readw_o(&fp->regs->control_a); /* Synchronize. */ 1237 spin_unlock_irqrestore(&fp->lock, flags); 1238 1239 /* DEC says SHUT needs up to 10 seconds to complete. */ 1240 t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag, 1241 15 * HZ); 1242 state = FZA_STATUS_GET_STATE(readw_o(&fp->regs->status)); 1243 if (fp->state_chg_flag == 0) { 1244 pr_err("%s: SHUT timed out!, state %x\n", fp->name, state); 1245 return -EIO; 1246 } 1247 if (state != FZA_STATE_UNINITIALIZED) { 1248 pr_err("%s: SHUT failed!, state %x\n", fp->name, state); 1249 return -EIO; 1250 } 1251 pr_debug("%s: SHUT: %lums elapsed\n", fp->name, 1252 (15 * HZ - t) * 1000 / HZ); 1253 1254 for (i = 0; i < FZA_RING_RX_SIZE; i++) 1255 if (fp->rx_skbuff[i]) { 1256 dma_unmap_single(fp->bdev, fp->rx_dma[i], 1257 FZA_RX_BUFFER_SIZE, DMA_FROM_DEVICE); 1258 dev_kfree_skb(fp->rx_skbuff[i]); 1259 fp->rx_dma[i] = 0; 1260 fp->rx_skbuff[i] = NULL; 1261 } 1262 1263 return 0; 1264 } 1265 1266 static struct net_device_stats *fza_get_stats(struct net_device *dev) 1267 { 1268 struct fza_private *fp = netdev_priv(dev); 1269 1270 return &fp->stats; 1271 } 1272 1273 static int fza_probe(struct device *bdev) 1274 { 1275 static const struct net_device_ops netdev_ops = { 1276 .ndo_open = fza_open, 1277 .ndo_stop = fza_close, 1278 .ndo_start_xmit = fza_start_xmit, 1279 .ndo_set_rx_mode = fza_set_rx_mode, 1280 .ndo_set_mac_address = fza_set_mac_address, 1281 .ndo_get_stats = fza_get_stats, 1282 }; 1283 static int version_printed; 1284 char rom_rev[4], fw_rev[4], rmc_rev[4]; 1285 struct tc_dev *tdev = to_tc_dev(bdev); 1286 struct fza_cmd_init __iomem *init; 1287 resource_size_t start, len; 1288 struct net_device *dev; 1289 struct fza_private *fp; 1290 uint smt_ver, pmd_type; 1291 void __iomem *mmio; 1292 uint hw_addr[2]; 1293 int ret, i; 1294 1295 if (!version_printed) { 1296 pr_info("%s", version); 1297 version_printed = 1; 1298 } 1299 1300 dev = alloc_fddidev(sizeof(*fp)); 1301 if (!dev) 1302 return -ENOMEM; 1303 SET_NETDEV_DEV(dev, bdev); 1304 1305 fp = netdev_priv(dev); 1306 dev_set_drvdata(bdev, dev); 1307 1308 fp->bdev = bdev; 1309 fp->name = dev_name(bdev); 1310 1311 /* Request the I/O MEM resource. */ 1312 start = tdev->resource.start; 1313 len = tdev->resource.end - start + 1; 1314 if (!request_mem_region(start, len, dev_name(bdev))) { 1315 pr_err("%s: cannot reserve MMIO region\n", fp->name); 1316 ret = -EBUSY; 1317 goto err_out_kfree; 1318 } 1319 1320 /* MMIO mapping setup. */ 1321 mmio = ioremap(start, len); 1322 if (!mmio) { 1323 pr_err("%s: cannot map MMIO\n", fp->name); 1324 ret = -ENOMEM; 1325 goto err_out_resource; 1326 } 1327 1328 /* Initialize the new device structure. */ 1329 switch (loopback) { 1330 case FZA_LOOP_NORMAL: 1331 case FZA_LOOP_INTERN: 1332 case FZA_LOOP_EXTERN: 1333 break; 1334 default: 1335 loopback = FZA_LOOP_NORMAL; 1336 } 1337 1338 fp->mmio = mmio; 1339 dev->irq = tdev->interrupt; 1340 1341 pr_info("%s: DEC FDDIcontroller 700 or 700-C at 0x%08llx, irq %d\n", 1342 fp->name, (long long)tdev->resource.start, dev->irq); 1343 pr_debug("%s: mapped at: 0x%p\n", fp->name, mmio); 1344 1345 fp->regs = mmio + FZA_REG_BASE; 1346 fp->ring_cmd = mmio + FZA_RING_CMD; 1347 fp->ring_uns = mmio + FZA_RING_UNS; 1348 1349 init_waitqueue_head(&fp->state_chg_wait); 1350 init_waitqueue_head(&fp->cmd_done_wait); 1351 spin_lock_init(&fp->lock); 1352 fp->int_mask = FZA_MASK_NORMAL; 1353 1354 timer_setup(&fp->reset_timer, fza_reset_timer, 0); 1355 1356 /* Sanitize the board. */ 1357 fza_regs_dump(fp); 1358 fza_do_shutdown(fp); 1359 1360 ret = request_irq(dev->irq, fza_interrupt, IRQF_SHARED, fp->name, dev); 1361 if (ret != 0) { 1362 pr_err("%s: unable to get IRQ %d!\n", fp->name, dev->irq); 1363 goto err_out_map; 1364 } 1365 1366 /* Enable the driver mode. */ 1367 writew_o(FZA_CONTROL_B_DRIVER, &fp->regs->control_b); 1368 1369 /* For some reason transmit done interrupts can trigger during 1370 * reset. This avoids a division error in the handler. 1371 */ 1372 fp->ring_rmc_tx_size = FZA_RING_TX_SIZE; 1373 1374 ret = fza_reset(fp); 1375 if (ret != 0) 1376 goto err_out_irq; 1377 1378 ret = fza_init_send(dev, &init); 1379 if (ret != 0) 1380 goto err_out_irq; 1381 1382 fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr)); 1383 memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN); 1384 1385 fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev)); 1386 fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev)); 1387 fza_reads(&init->rmc_rev, &rmc_rev, sizeof(rmc_rev)); 1388 for (i = 3; i >= 0 && rom_rev[i] == ' '; i--) 1389 rom_rev[i] = 0; 1390 for (i = 3; i >= 0 && fw_rev[i] == ' '; i--) 1391 fw_rev[i] = 0; 1392 for (i = 3; i >= 0 && rmc_rev[i] == ' '; i--) 1393 rmc_rev[i] = 0; 1394 1395 fp->ring_rmc_tx = mmio + readl_u(&init->rmc_tx); 1396 fp->ring_rmc_tx_size = readl_u(&init->rmc_tx_size); 1397 fp->ring_hst_rx = mmio + readl_u(&init->hst_rx); 1398 fp->ring_hst_rx_size = readl_u(&init->hst_rx_size); 1399 fp->ring_smt_tx = mmio + readl_u(&init->smt_tx); 1400 fp->ring_smt_tx_size = readl_u(&init->smt_tx_size); 1401 fp->ring_smt_rx = mmio + readl_u(&init->smt_rx); 1402 fp->ring_smt_rx_size = readl_u(&init->smt_rx_size); 1403 1404 fp->buffer_tx = mmio + FZA_TX_BUFFER_ADDR(readl_u(&init->rmc_tx)); 1405 1406 fp->t_max = readl_u(&init->def_t_max); 1407 fp->t_req = readl_u(&init->def_t_req); 1408 fp->tvx = readl_u(&init->def_tvx); 1409 fp->lem_threshold = readl_u(&init->lem_threshold); 1410 fza_reads(&init->def_station_id, &fp->station_id, 1411 sizeof(fp->station_id)); 1412 fp->rtoken_timeout = readl_u(&init->rtoken_timeout); 1413 fp->ring_purger = readl_u(&init->ring_purger); 1414 1415 smt_ver = readl_u(&init->smt_ver); 1416 pmd_type = readl_u(&init->pmd_type); 1417 1418 pr_debug("%s: INIT parameters:\n", fp->name); 1419 pr_debug(" tx_mode: %u\n", readl_u(&init->tx_mode)); 1420 pr_debug(" hst_rx_size: %u\n", readl_u(&init->hst_rx_size)); 1421 pr_debug(" rmc_rev: %.4s\n", rmc_rev); 1422 pr_debug(" rom_rev: %.4s\n", rom_rev); 1423 pr_debug(" fw_rev: %.4s\n", fw_rev); 1424 pr_debug(" mop_type: %u\n", readl_u(&init->mop_type)); 1425 pr_debug(" hst_rx: 0x%08x\n", readl_u(&init->hst_rx)); 1426 pr_debug(" rmc_tx: 0x%08x\n", readl_u(&init->rmc_tx)); 1427 pr_debug(" rmc_tx_size: %u\n", readl_u(&init->rmc_tx_size)); 1428 pr_debug(" smt_tx: 0x%08x\n", readl_u(&init->smt_tx)); 1429 pr_debug(" smt_tx_size: %u\n", readl_u(&init->smt_tx_size)); 1430 pr_debug(" smt_rx: 0x%08x\n", readl_u(&init->smt_rx)); 1431 pr_debug(" smt_rx_size: %u\n", readl_u(&init->smt_rx_size)); 1432 /* TC systems are always LE, so don't bother swapping. */ 1433 pr_debug(" hw_addr: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 1434 (readl_u(&init->hw_addr[0]) >> 0) & 0xff, 1435 (readl_u(&init->hw_addr[0]) >> 8) & 0xff, 1436 (readl_u(&init->hw_addr[0]) >> 16) & 0xff, 1437 (readl_u(&init->hw_addr[0]) >> 24) & 0xff, 1438 (readl_u(&init->hw_addr[1]) >> 0) & 0xff, 1439 (readl_u(&init->hw_addr[1]) >> 8) & 0xff, 1440 (readl_u(&init->hw_addr[1]) >> 16) & 0xff, 1441 (readl_u(&init->hw_addr[1]) >> 24) & 0xff); 1442 pr_debug(" def_t_req: %u\n", readl_u(&init->def_t_req)); 1443 pr_debug(" def_tvx: %u\n", readl_u(&init->def_tvx)); 1444 pr_debug(" def_t_max: %u\n", readl_u(&init->def_t_max)); 1445 pr_debug(" lem_threshold: %u\n", readl_u(&init->lem_threshold)); 1446 /* Don't bother swapping, see above. */ 1447 pr_debug(" def_station_id: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 1448 (readl_u(&init->def_station_id[0]) >> 0) & 0xff, 1449 (readl_u(&init->def_station_id[0]) >> 8) & 0xff, 1450 (readl_u(&init->def_station_id[0]) >> 16) & 0xff, 1451 (readl_u(&init->def_station_id[0]) >> 24) & 0xff, 1452 (readl_u(&init->def_station_id[1]) >> 0) & 0xff, 1453 (readl_u(&init->def_station_id[1]) >> 8) & 0xff, 1454 (readl_u(&init->def_station_id[1]) >> 16) & 0xff, 1455 (readl_u(&init->def_station_id[1]) >> 24) & 0xff); 1456 pr_debug(" pmd_type_alt: %u\n", readl_u(&init->pmd_type_alt)); 1457 pr_debug(" smt_ver: %u\n", readl_u(&init->smt_ver)); 1458 pr_debug(" rtoken_timeout: %u\n", readl_u(&init->rtoken_timeout)); 1459 pr_debug(" ring_purger: %u\n", readl_u(&init->ring_purger)); 1460 pr_debug(" smt_ver_max: %u\n", readl_u(&init->smt_ver_max)); 1461 pr_debug(" smt_ver_min: %u\n", readl_u(&init->smt_ver_min)); 1462 pr_debug(" pmd_type: %u\n", readl_u(&init->pmd_type)); 1463 1464 pr_info("%s: model %s, address %pMF\n", 1465 fp->name, 1466 pmd_type == FZA_PMD_TYPE_TW ? 1467 "700-C (DEFZA-CA), ThinWire PMD selected" : 1468 pmd_type == FZA_PMD_TYPE_STP ? 1469 "700-C (DEFZA-CA), STP PMD selected" : 1470 "700 (DEFZA-AA), MMF PMD", 1471 dev->dev_addr); 1472 pr_info("%s: ROM rev. %.4s, firmware rev. %.4s, RMC rev. %.4s, " 1473 "SMT ver. %u\n", fp->name, rom_rev, fw_rev, rmc_rev, smt_ver); 1474 1475 /* Now that we fetched initial parameters just shut the interface 1476 * until opened. 1477 */ 1478 ret = fza_close(dev); 1479 if (ret != 0) 1480 goto err_out_irq; 1481 1482 /* The FZA-specific entries in the device structure. */ 1483 dev->netdev_ops = &netdev_ops; 1484 1485 ret = register_netdev(dev); 1486 if (ret != 0) 1487 goto err_out_irq; 1488 1489 pr_info("%s: registered as %s\n", fp->name, dev->name); 1490 fp->name = (const char *)dev->name; 1491 1492 get_device(bdev); 1493 return 0; 1494 1495 err_out_irq: 1496 del_timer_sync(&fp->reset_timer); 1497 fza_do_shutdown(fp); 1498 free_irq(dev->irq, dev); 1499 1500 err_out_map: 1501 iounmap(mmio); 1502 1503 err_out_resource: 1504 release_mem_region(start, len); 1505 1506 err_out_kfree: 1507 free_netdev(dev); 1508 1509 pr_err("%s: initialization failure, aborting!\n", fp->name); 1510 return ret; 1511 } 1512 1513 static int fza_remove(struct device *bdev) 1514 { 1515 struct net_device *dev = dev_get_drvdata(bdev); 1516 struct fza_private *fp = netdev_priv(dev); 1517 struct tc_dev *tdev = to_tc_dev(bdev); 1518 resource_size_t start, len; 1519 1520 put_device(bdev); 1521 1522 unregister_netdev(dev); 1523 1524 del_timer_sync(&fp->reset_timer); 1525 fza_do_shutdown(fp); 1526 free_irq(dev->irq, dev); 1527 1528 iounmap(fp->mmio); 1529 1530 start = tdev->resource.start; 1531 len = tdev->resource.end - start + 1; 1532 release_mem_region(start, len); 1533 1534 free_netdev(dev); 1535 1536 return 0; 1537 } 1538 1539 static struct tc_device_id const fza_tc_table[] = { 1540 { "DEC ", "PMAF-AA " }, 1541 { } 1542 }; 1543 MODULE_DEVICE_TABLE(tc, fza_tc_table); 1544 1545 static struct tc_driver fza_driver = { 1546 .id_table = fza_tc_table, 1547 .driver = { 1548 .name = "defza", 1549 .bus = &tc_bus_type, 1550 .probe = fza_probe, 1551 .remove = fza_remove, 1552 }, 1553 }; 1554 1555 static int fza_init(void) 1556 { 1557 return tc_register_driver(&fza_driver); 1558 } 1559 1560 static void fza_exit(void) 1561 { 1562 tc_unregister_driver(&fza_driver); 1563 } 1564 1565 module_init(fza_init); 1566 module_exit(fza_exit); 1567