1 // SPDX-License-Identifier: GPL-2.0+ 2 /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. 3 * 4 * Copyright (c) 2018 Maciej W. Rozycki 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * References: 12 * 13 * Dave Sawyer & Phil Weeks & Frank Itkowsky, 14 * "DEC FDDIcontroller 700 Port Specification", 15 * Revision 1.1, Digital Equipment Corporation 16 */ 17 18 /* ------------------------------------------------------------------------- */ 19 /* FZA configurable parameters. */ 20 21 /* The number of transmit ring descriptors; either 0 for 512 or 1 for 1024. */ 22 #define FZA_RING_TX_MODE 0 23 24 /* The number of receive ring descriptors; from 2 up to 256. */ 25 #define FZA_RING_RX_SIZE 256 26 27 /* End of FZA configurable parameters. No need to change anything below. */ 28 /* ------------------------------------------------------------------------- */ 29 30 #include <linux/delay.h> 31 #include <linux/device.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/init.h> 34 #include <linux/interrupt.h> 35 #include <linux/io.h> 36 #include <linux/ioport.h> 37 #include <linux/kernel.h> 38 #include <linux/list.h> 39 #include <linux/module.h> 40 #include <linux/netdevice.h> 41 #include <linux/fddidevice.h> 42 #include <linux/sched.h> 43 #include <linux/skbuff.h> 44 #include <linux/spinlock.h> 45 #include <linux/stat.h> 46 #include <linux/tc.h> 47 #include <linux/timer.h> 48 #include <linux/types.h> 49 #include <linux/wait.h> 50 51 #include <asm/barrier.h> 52 53 #include "defza.h" 54 55 #define DRV_NAME "defza" 56 #define DRV_VERSION "v.1.1.4" 57 #define DRV_RELDATE "Oct 6 2018" 58 59 static char version[] = 60 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n"; 61 62 MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); 63 MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver"); 64 MODULE_LICENSE("GPL"); 65 66 static int loopback; 67 module_param(loopback, int, 0644); 68 69 /* Ring Purger Multicast */ 70 static u8 hw_addr_purger[8] = { 0x09, 0x00, 0x2b, 0x02, 0x01, 0x05 }; 71 /* Directed Beacon Multicast */ 72 static u8 hw_addr_beacon[8] = { 0x01, 0x80, 0xc2, 0x00, 0x01, 0x00 }; 73 74 /* Shorthands for MMIO accesses that we require to be strongly ordered 75 * WRT preceding MMIO accesses. 76 */ 77 #define readw_o readw_relaxed 78 #define readl_o readl_relaxed 79 80 #define writew_o writew_relaxed 81 #define writel_o writel_relaxed 82 83 /* Shorthands for MMIO accesses that we are happy with being weakly ordered 84 * WRT preceding MMIO accesses. 85 */ 86 #define readw_u readw_relaxed 87 #define readl_u readl_relaxed 88 #define readq_u readq_relaxed 89 90 #define writew_u writew_relaxed 91 #define writel_u writel_relaxed 92 #define writeq_u writeq_relaxed 93 94 static inline struct sk_buff *fza_alloc_skb_irq(struct net_device *dev, 95 unsigned int length) 96 { 97 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 98 } 99 100 static inline struct sk_buff *fza_alloc_skb(struct net_device *dev, 101 unsigned int length) 102 { 103 return __netdev_alloc_skb(dev, length, GFP_KERNEL); 104 } 105 106 static inline void fza_skb_align(struct sk_buff *skb, unsigned int v) 107 { 108 unsigned long x, y; 109 110 x = (unsigned long)skb->data; 111 y = ALIGN(x, v); 112 113 skb_reserve(skb, y - x); 114 } 115 116 static inline void fza_reads(const void __iomem *from, void *to, 117 unsigned long size) 118 { 119 if (sizeof(unsigned long) == 8) { 120 const u64 __iomem *src = from; 121 const u32 __iomem *src_trail; 122 u64 *dst = to; 123 u32 *dst_trail; 124 125 for (size = (size + 3) / 4; size > 1; size -= 2) 126 *dst++ = readq_u(src++); 127 if (size) { 128 src_trail = (u32 __iomem *)src; 129 dst_trail = (u32 *)dst; 130 *dst_trail = readl_u(src_trail); 131 } 132 } else { 133 const u32 __iomem *src = from; 134 u32 *dst = to; 135 136 for (size = (size + 3) / 4; size; size--) 137 *dst++ = readl_u(src++); 138 } 139 } 140 141 static inline void fza_writes(const void *from, void __iomem *to, 142 unsigned long size) 143 { 144 if (sizeof(unsigned long) == 8) { 145 const u64 *src = from; 146 const u32 *src_trail; 147 u64 __iomem *dst = to; 148 u32 __iomem *dst_trail; 149 150 for (size = (size + 3) / 4; size > 1; size -= 2) 151 writeq_u(*src++, dst++); 152 if (size) { 153 src_trail = (u32 *)src; 154 dst_trail = (u32 __iomem *)dst; 155 writel_u(*src_trail, dst_trail); 156 } 157 } else { 158 const u32 *src = from; 159 u32 __iomem *dst = to; 160 161 for (size = (size + 3) / 4; size; size--) 162 writel_u(*src++, dst++); 163 } 164 } 165 166 static inline void fza_moves(const void __iomem *from, void __iomem *to, 167 unsigned long size) 168 { 169 if (sizeof(unsigned long) == 8) { 170 const u64 __iomem *src = from; 171 const u32 __iomem *src_trail; 172 u64 __iomem *dst = to; 173 u32 __iomem *dst_trail; 174 175 for (size = (size + 3) / 4; size > 1; size -= 2) 176 writeq_u(readq_u(src++), dst++); 177 if (size) { 178 src_trail = (u32 __iomem *)src; 179 dst_trail = (u32 __iomem *)dst; 180 writel_u(readl_u(src_trail), dst_trail); 181 } 182 } else { 183 const u32 __iomem *src = from; 184 u32 __iomem *dst = to; 185 186 for (size = (size + 3) / 4; size; size--) 187 writel_u(readl_u(src++), dst++); 188 } 189 } 190 191 static inline void fza_zeros(void __iomem *to, unsigned long size) 192 { 193 if (sizeof(unsigned long) == 8) { 194 u64 __iomem *dst = to; 195 u32 __iomem *dst_trail; 196 197 for (size = (size + 3) / 4; size > 1; size -= 2) 198 writeq_u(0, dst++); 199 if (size) { 200 dst_trail = (u32 __iomem *)dst; 201 writel_u(0, dst_trail); 202 } 203 } else { 204 u32 __iomem *dst = to; 205 206 for (size = (size + 3) / 4; size; size--) 207 writel_u(0, dst++); 208 } 209 } 210 211 static inline void fza_regs_dump(struct fza_private *fp) 212 { 213 pr_debug("%s: iomem registers:\n", fp->name); 214 pr_debug(" reset: 0x%04x\n", readw_o(&fp->regs->reset)); 215 pr_debug(" interrupt event: 0x%04x\n", readw_u(&fp->regs->int_event)); 216 pr_debug(" status: 0x%04x\n", readw_u(&fp->regs->status)); 217 pr_debug(" interrupt mask: 0x%04x\n", readw_u(&fp->regs->int_mask)); 218 pr_debug(" control A: 0x%04x\n", readw_u(&fp->regs->control_a)); 219 pr_debug(" control B: 0x%04x\n", readw_u(&fp->regs->control_b)); 220 } 221 222 static inline void fza_do_reset(struct fza_private *fp) 223 { 224 /* Reset the board. */ 225 writew_o(FZA_RESET_INIT, &fp->regs->reset); 226 readw_o(&fp->regs->reset); /* Synchronize. */ 227 readw_o(&fp->regs->reset); /* Read it back for a small delay. */ 228 writew_o(FZA_RESET_CLR, &fp->regs->reset); 229 230 /* Enable all interrupt events we handle. */ 231 writew_o(fp->int_mask, &fp->regs->int_mask); 232 readw_o(&fp->regs->int_mask); /* Synchronize. */ 233 } 234 235 static inline void fza_do_shutdown(struct fza_private *fp) 236 { 237 /* Disable the driver mode. */ 238 writew_o(FZA_CONTROL_B_IDLE, &fp->regs->control_b); 239 240 /* And reset the board. */ 241 writew_o(FZA_RESET_INIT, &fp->regs->reset); 242 readw_o(&fp->regs->reset); /* Synchronize. */ 243 writew_o(FZA_RESET_CLR, &fp->regs->reset); 244 readw_o(&fp->regs->reset); /* Synchronize. */ 245 } 246 247 static int fza_reset(struct fza_private *fp) 248 { 249 unsigned long flags; 250 uint status, state; 251 long t; 252 253 pr_info("%s: resetting the board...\n", fp->name); 254 255 spin_lock_irqsave(&fp->lock, flags); 256 fp->state_chg_flag = 0; 257 fza_do_reset(fp); 258 spin_unlock_irqrestore(&fp->lock, flags); 259 260 /* DEC says RESET needs up to 30 seconds to complete. My DEFZA-AA 261 * rev. C03 happily finishes in 9.7 seconds. :-) But we need to 262 * be on the safe side... 263 */ 264 t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag, 265 45 * HZ); 266 status = readw_u(&fp->regs->status); 267 state = FZA_STATUS_GET_STATE(status); 268 if (fp->state_chg_flag == 0) { 269 pr_err("%s: RESET timed out!, state %x\n", fp->name, state); 270 return -EIO; 271 } 272 if (state != FZA_STATE_UNINITIALIZED) { 273 pr_err("%s: RESET failed!, state %x, failure ID %x\n", 274 fp->name, state, FZA_STATUS_GET_TEST(status)); 275 return -EIO; 276 } 277 pr_info("%s: OK\n", fp->name); 278 pr_debug("%s: RESET: %lums elapsed\n", fp->name, 279 (45 * HZ - t) * 1000 / HZ); 280 281 return 0; 282 } 283 284 static struct fza_ring_cmd __iomem *fza_cmd_send(struct net_device *dev, 285 int command) 286 { 287 struct fza_private *fp = netdev_priv(dev); 288 struct fza_ring_cmd __iomem *ring = fp->ring_cmd + fp->ring_cmd_index; 289 unsigned int old_mask, new_mask; 290 union fza_cmd_buf __iomem *buf; 291 struct netdev_hw_addr *ha; 292 int i; 293 294 old_mask = fp->int_mask; 295 new_mask = old_mask & ~FZA_MASK_STATE_CHG; 296 writew_u(new_mask, &fp->regs->int_mask); 297 readw_o(&fp->regs->int_mask); /* Synchronize. */ 298 fp->int_mask = new_mask; 299 300 buf = fp->mmio + readl_u(&ring->buffer); 301 302 if ((readl_u(&ring->cmd_own) & FZA_RING_OWN_MASK) != 303 FZA_RING_OWN_HOST) { 304 pr_warn("%s: command buffer full, command: %u!\n", fp->name, 305 command); 306 return NULL; 307 } 308 309 switch (command) { 310 case FZA_RING_CMD_INIT: 311 writel_u(FZA_RING_TX_MODE, &buf->init.tx_mode); 312 writel_u(FZA_RING_RX_SIZE, &buf->init.hst_rx_size); 313 fza_zeros(&buf->init.counters, sizeof(buf->init.counters)); 314 break; 315 316 case FZA_RING_CMD_MODCAM: 317 i = 0; 318 fza_writes(&hw_addr_purger, &buf->cam.hw_addr[i++], 319 sizeof(*buf->cam.hw_addr)); 320 fza_writes(&hw_addr_beacon, &buf->cam.hw_addr[i++], 321 sizeof(*buf->cam.hw_addr)); 322 netdev_for_each_mc_addr(ha, dev) { 323 if (i >= FZA_CMD_CAM_SIZE) 324 break; 325 fza_writes(ha->addr, &buf->cam.hw_addr[i++], 326 sizeof(*buf->cam.hw_addr)); 327 } 328 while (i < FZA_CMD_CAM_SIZE) 329 fza_zeros(&buf->cam.hw_addr[i++], 330 sizeof(*buf->cam.hw_addr)); 331 break; 332 333 case FZA_RING_CMD_PARAM: 334 writel_u(loopback, &buf->param.loop_mode); 335 writel_u(fp->t_max, &buf->param.t_max); 336 writel_u(fp->t_req, &buf->param.t_req); 337 writel_u(fp->tvx, &buf->param.tvx); 338 writel_u(fp->lem_threshold, &buf->param.lem_threshold); 339 fza_writes(&fp->station_id, &buf->param.station_id, 340 sizeof(buf->param.station_id)); 341 /* Convert to milliseconds due to buggy firmware. */ 342 writel_u(fp->rtoken_timeout / 12500, 343 &buf->param.rtoken_timeout); 344 writel_u(fp->ring_purger, &buf->param.ring_purger); 345 break; 346 347 case FZA_RING_CMD_MODPROM: 348 if (dev->flags & IFF_PROMISC) { 349 writel_u(1, &buf->modprom.llc_prom); 350 writel_u(1, &buf->modprom.smt_prom); 351 } else { 352 writel_u(0, &buf->modprom.llc_prom); 353 writel_u(0, &buf->modprom.smt_prom); 354 } 355 if (dev->flags & IFF_ALLMULTI || 356 netdev_mc_count(dev) > FZA_CMD_CAM_SIZE - 2) 357 writel_u(1, &buf->modprom.llc_multi); 358 else 359 writel_u(0, &buf->modprom.llc_multi); 360 writel_u(1, &buf->modprom.llc_bcast); 361 break; 362 } 363 364 /* Trigger the command. */ 365 writel_u(FZA_RING_OWN_FZA | command, &ring->cmd_own); 366 writew_o(FZA_CONTROL_A_CMD_POLL, &fp->regs->control_a); 367 368 fp->ring_cmd_index = (fp->ring_cmd_index + 1) % FZA_RING_CMD_SIZE; 369 370 fp->int_mask = old_mask; 371 writew_u(fp->int_mask, &fp->regs->int_mask); 372 373 return ring; 374 } 375 376 static int fza_init_send(struct net_device *dev, 377 struct fza_cmd_init *__iomem *init) 378 { 379 struct fza_private *fp = netdev_priv(dev); 380 struct fza_ring_cmd __iomem *ring; 381 unsigned long flags; 382 u32 stat; 383 long t; 384 385 spin_lock_irqsave(&fp->lock, flags); 386 fp->cmd_done_flag = 0; 387 ring = fza_cmd_send(dev, FZA_RING_CMD_INIT); 388 spin_unlock_irqrestore(&fp->lock, flags); 389 if (!ring) 390 /* This should never happen in the uninitialized state, 391 * so do not try to recover and just consider it fatal. 392 */ 393 return -ENOBUFS; 394 395 /* INIT may take quite a long time (160ms for my C03). */ 396 t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ); 397 if (fp->cmd_done_flag == 0) { 398 pr_err("%s: INIT command timed out!, state %x\n", fp->name, 399 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 400 return -EIO; 401 } 402 stat = readl_u(&ring->stat); 403 if (stat != FZA_RING_STAT_SUCCESS) { 404 pr_err("%s: INIT command failed!, status %02x, state %x\n", 405 fp->name, stat, 406 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 407 return -EIO; 408 } 409 pr_debug("%s: INIT: %lums elapsed\n", fp->name, 410 (3 * HZ - t) * 1000 / HZ); 411 412 if (init) 413 *init = fp->mmio + readl_u(&ring->buffer); 414 return 0; 415 } 416 417 static void fza_rx_init(struct fza_private *fp) 418 { 419 int i; 420 421 /* Fill the host receive descriptor ring. */ 422 for (i = 0; i < FZA_RING_RX_SIZE; i++) { 423 writel_o(0, &fp->ring_hst_rx[i].rmc); 424 writel_o((fp->rx_dma[i] + 0x1000) >> 9, 425 &fp->ring_hst_rx[i].buffer1); 426 writel_o(fp->rx_dma[i] >> 9 | FZA_RING_OWN_FZA, 427 &fp->ring_hst_rx[i].buf0_own); 428 } 429 } 430 431 static void fza_set_rx_mode(struct net_device *dev) 432 { 433 fza_cmd_send(dev, FZA_RING_CMD_MODCAM); 434 fza_cmd_send(dev, FZA_RING_CMD_MODPROM); 435 } 436 437 union fza_buffer_txp { 438 struct fza_buffer_tx *data_ptr; 439 struct fza_buffer_tx __iomem *mmio_ptr; 440 }; 441 442 static int fza_do_xmit(union fza_buffer_txp ub, int len, 443 struct net_device *dev, int smt) 444 { 445 struct fza_private *fp = netdev_priv(dev); 446 struct fza_buffer_tx __iomem *rmc_tx_ptr; 447 int i, first, frag_len, left_len; 448 u32 own, rmc; 449 450 if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) - 451 fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) * 452 FZA_TX_BUFFER_SIZE) < len) 453 return 1; 454 455 first = fp->ring_rmc_tx_index; 456 457 left_len = len; 458 frag_len = FZA_TX_BUFFER_SIZE; 459 /* First descriptor is relinquished last. */ 460 own = FZA_RING_TX_OWN_HOST; 461 /* First descriptor carries frame length; we don't use cut-through. */ 462 rmc = FZA_RING_TX_SOP | FZA_RING_TX_VBC | len; 463 do { 464 i = fp->ring_rmc_tx_index; 465 rmc_tx_ptr = &fp->buffer_tx[i]; 466 467 if (left_len < FZA_TX_BUFFER_SIZE) 468 frag_len = left_len; 469 left_len -= frag_len; 470 471 /* Length must be a multiple of 4 as only word writes are 472 * permitted! 473 */ 474 frag_len = (frag_len + 3) & ~3; 475 if (smt) 476 fza_moves(ub.mmio_ptr, rmc_tx_ptr, frag_len); 477 else 478 fza_writes(ub.data_ptr, rmc_tx_ptr, frag_len); 479 480 if (left_len == 0) 481 rmc |= FZA_RING_TX_EOP; /* Mark last frag. */ 482 483 writel_o(rmc, &fp->ring_rmc_tx[i].rmc); 484 writel_o(own, &fp->ring_rmc_tx[i].own); 485 486 ub.data_ptr++; 487 fp->ring_rmc_tx_index = (fp->ring_rmc_tx_index + 1) % 488 fp->ring_rmc_tx_size; 489 490 /* Settings for intermediate frags. */ 491 own = FZA_RING_TX_OWN_RMC; 492 rmc = 0; 493 } while (left_len > 0); 494 495 if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) - 496 fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) * 497 FZA_TX_BUFFER_SIZE) < dev->mtu + dev->hard_header_len) { 498 netif_stop_queue(dev); 499 pr_debug("%s: queue stopped\n", fp->name); 500 } 501 502 writel_o(FZA_RING_TX_OWN_RMC, &fp->ring_rmc_tx[first].own); 503 504 /* Go, go, go! */ 505 writew_o(FZA_CONTROL_A_TX_POLL, &fp->regs->control_a); 506 507 return 0; 508 } 509 510 static int fza_do_recv_smt(struct fza_buffer_tx *data_ptr, int len, 511 u32 rmc, struct net_device *dev) 512 { 513 struct fza_private *fp = netdev_priv(dev); 514 struct fza_buffer_tx __iomem *smt_rx_ptr; 515 u32 own; 516 int i; 517 518 i = fp->ring_smt_rx_index; 519 own = readl_o(&fp->ring_smt_rx[i].own); 520 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 521 return 1; 522 523 smt_rx_ptr = fp->mmio + readl_u(&fp->ring_smt_rx[i].buffer); 524 525 /* Length must be a multiple of 4 as only word writes are permitted! */ 526 fza_writes(data_ptr, smt_rx_ptr, (len + 3) & ~3); 527 528 writel_o(rmc, &fp->ring_smt_rx[i].rmc); 529 writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_rx[i].own); 530 531 fp->ring_smt_rx_index = 532 (fp->ring_smt_rx_index + 1) % fp->ring_smt_rx_size; 533 534 /* Grab it! */ 535 writew_o(FZA_CONTROL_A_SMT_RX_POLL, &fp->regs->control_a); 536 537 return 0; 538 } 539 540 static void fza_tx(struct net_device *dev) 541 { 542 struct fza_private *fp = netdev_priv(dev); 543 u32 own, rmc; 544 int i; 545 546 while (1) { 547 i = fp->ring_rmc_txd_index; 548 if (i == fp->ring_rmc_tx_index) 549 break; 550 own = readl_o(&fp->ring_rmc_tx[i].own); 551 if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) 552 break; 553 554 rmc = readl_u(&fp->ring_rmc_tx[i].rmc); 555 /* Only process the first descriptor. */ 556 if ((rmc & FZA_RING_TX_SOP) != 0) { 557 if ((rmc & FZA_RING_TX_DCC_MASK) == 558 FZA_RING_TX_DCC_SUCCESS) { 559 int pkt_len = (rmc & FZA_RING_PBC_MASK) - 3; 560 /* Omit PRH. */ 561 562 fp->stats.tx_packets++; 563 fp->stats.tx_bytes += pkt_len; 564 } else { 565 fp->stats.tx_errors++; 566 switch (rmc & FZA_RING_TX_DCC_MASK) { 567 case FZA_RING_TX_DCC_DTP_SOP: 568 case FZA_RING_TX_DCC_DTP: 569 case FZA_RING_TX_DCC_ABORT: 570 fp->stats.tx_aborted_errors++; 571 break; 572 case FZA_RING_TX_DCC_UNDRRUN: 573 fp->stats.tx_fifo_errors++; 574 break; 575 case FZA_RING_TX_DCC_PARITY: 576 default: 577 break; 578 } 579 } 580 } 581 582 fp->ring_rmc_txd_index = (fp->ring_rmc_txd_index + 1) % 583 fp->ring_rmc_tx_size; 584 } 585 586 if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) - 587 fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) * 588 FZA_TX_BUFFER_SIZE) >= dev->mtu + dev->hard_header_len) { 589 if (fp->queue_active) { 590 netif_wake_queue(dev); 591 pr_debug("%s: queue woken\n", fp->name); 592 } 593 } 594 } 595 596 static inline int fza_rx_err(struct fza_private *fp, 597 const u32 rmc, const u8 fc) 598 { 599 int len, min_len, max_len; 600 601 len = rmc & FZA_RING_PBC_MASK; 602 603 if (unlikely((rmc & FZA_RING_RX_BAD) != 0)) { 604 fp->stats.rx_errors++; 605 606 /* Check special status codes. */ 607 if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK | 608 FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) == 609 (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR | 610 FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_ALIAS)) { 611 if (len >= 8190) 612 fp->stats.rx_length_errors++; 613 return 1; 614 } 615 if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK | 616 FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) == 617 (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR | 618 FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_CAM)) { 619 /* Halt the interface to trigger a reset. */ 620 writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a); 621 readw_o(&fp->regs->control_a); /* Synchronize. */ 622 return 1; 623 } 624 625 /* Check the MAC status. */ 626 switch (rmc & FZA_RING_RX_RRR_MASK) { 627 case FZA_RING_RX_RRR_OK: 628 if ((rmc & FZA_RING_RX_CRC) != 0) 629 fp->stats.rx_crc_errors++; 630 else if ((rmc & FZA_RING_RX_FSC_MASK) == 0 || 631 (rmc & FZA_RING_RX_FSB_ERR) != 0) 632 fp->stats.rx_frame_errors++; 633 return 1; 634 case FZA_RING_RX_RRR_SADDR: 635 case FZA_RING_RX_RRR_DADDR: 636 case FZA_RING_RX_RRR_ABORT: 637 /* Halt the interface to trigger a reset. */ 638 writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a); 639 readw_o(&fp->regs->control_a); /* Synchronize. */ 640 return 1; 641 case FZA_RING_RX_RRR_LENGTH: 642 fp->stats.rx_frame_errors++; 643 return 1; 644 default: 645 return 1; 646 } 647 } 648 649 /* Packet received successfully; validate the length. */ 650 switch (fc & FDDI_FC_K_FORMAT_MASK) { 651 case FDDI_FC_K_FORMAT_MANAGEMENT: 652 if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_ASYNC) 653 min_len = 37; 654 else 655 min_len = 17; 656 break; 657 case FDDI_FC_K_FORMAT_LLC: 658 min_len = 20; 659 break; 660 default: 661 min_len = 17; 662 break; 663 } 664 max_len = 4495; 665 if (len < min_len || len > max_len) { 666 fp->stats.rx_errors++; 667 fp->stats.rx_length_errors++; 668 return 1; 669 } 670 671 return 0; 672 } 673 674 static void fza_rx(struct net_device *dev) 675 { 676 struct fza_private *fp = netdev_priv(dev); 677 struct sk_buff *skb, *newskb; 678 struct fza_fddihdr *frame; 679 dma_addr_t dma, newdma; 680 u32 own, rmc, buf; 681 int i, len; 682 u8 fc; 683 684 while (1) { 685 i = fp->ring_hst_rx_index; 686 own = readl_o(&fp->ring_hst_rx[i].buf0_own); 687 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 688 break; 689 690 rmc = readl_u(&fp->ring_hst_rx[i].rmc); 691 skb = fp->rx_skbuff[i]; 692 dma = fp->rx_dma[i]; 693 694 /* The RMC doesn't count the preamble and the starting 695 * delimiter. We fix it up here for a total of 3 octets. 696 */ 697 dma_rmb(); 698 len = (rmc & FZA_RING_PBC_MASK) + 3; 699 frame = (struct fza_fddihdr *)skb->data; 700 701 /* We need to get at real FC. */ 702 dma_sync_single_for_cpu(fp->bdev, 703 dma + 704 ((u8 *)&frame->hdr.fc - (u8 *)frame), 705 sizeof(frame->hdr.fc), 706 DMA_FROM_DEVICE); 707 fc = frame->hdr.fc; 708 709 if (fza_rx_err(fp, rmc, fc)) 710 goto err_rx; 711 712 /* We have to 512-byte-align RX buffers... */ 713 newskb = fza_alloc_skb_irq(dev, FZA_RX_BUFFER_SIZE + 511); 714 if (newskb) { 715 fza_skb_align(newskb, 512); 716 newdma = dma_map_single(fp->bdev, newskb->data, 717 FZA_RX_BUFFER_SIZE, 718 DMA_FROM_DEVICE); 719 if (dma_mapping_error(fp->bdev, newdma)) { 720 dev_kfree_skb_irq(newskb); 721 newskb = NULL; 722 } 723 } 724 if (newskb) { 725 int pkt_len = len - 7; /* Omit P, SD and FCS. */ 726 int is_multi; 727 int rx_stat; 728 729 dma_unmap_single(fp->bdev, dma, FZA_RX_BUFFER_SIZE, 730 DMA_FROM_DEVICE); 731 732 /* Queue SMT frames to the SMT receive ring. */ 733 if ((fc & (FDDI_FC_K_CLASS_MASK | 734 FDDI_FC_K_FORMAT_MASK)) == 735 (FDDI_FC_K_CLASS_ASYNC | 736 FDDI_FC_K_FORMAT_MANAGEMENT) && 737 (rmc & FZA_RING_RX_DA_MASK) != 738 FZA_RING_RX_DA_PROM) { 739 if (fza_do_recv_smt((struct fza_buffer_tx *) 740 skb->data, len, rmc, 741 dev)) { 742 writel_o(FZA_CONTROL_A_SMT_RX_OVFL, 743 &fp->regs->control_a); 744 } 745 } 746 747 is_multi = ((frame->hdr.daddr[0] & 0x01) != 0); 748 749 skb_reserve(skb, 3); /* Skip over P and SD. */ 750 skb_put(skb, pkt_len); /* And cut off FCS. */ 751 skb->protocol = fddi_type_trans(skb, dev); 752 753 rx_stat = netif_rx(skb); 754 if (rx_stat != NET_RX_DROP) { 755 fp->stats.rx_packets++; 756 fp->stats.rx_bytes += pkt_len; 757 if (is_multi) 758 fp->stats.multicast++; 759 } else { 760 fp->stats.rx_dropped++; 761 } 762 763 skb = newskb; 764 dma = newdma; 765 fp->rx_skbuff[i] = skb; 766 fp->rx_dma[i] = dma; 767 } else { 768 fp->stats.rx_dropped++; 769 pr_notice("%s: memory squeeze, dropping packet\n", 770 fp->name); 771 } 772 773 err_rx: 774 writel_o(0, &fp->ring_hst_rx[i].rmc); 775 buf = (dma + 0x1000) >> 9; 776 writel_o(buf, &fp->ring_hst_rx[i].buffer1); 777 buf = dma >> 9 | FZA_RING_OWN_FZA; 778 writel_o(buf, &fp->ring_hst_rx[i].buf0_own); 779 fp->ring_hst_rx_index = 780 (fp->ring_hst_rx_index + 1) % fp->ring_hst_rx_size; 781 } 782 } 783 784 static void fza_tx_smt(struct net_device *dev) 785 { 786 struct fza_private *fp = netdev_priv(dev); 787 struct fza_buffer_tx __iomem *smt_tx_ptr; 788 int i, len; 789 u32 own; 790 791 while (1) { 792 i = fp->ring_smt_tx_index; 793 own = readl_o(&fp->ring_smt_tx[i].own); 794 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 795 break; 796 797 smt_tx_ptr = fp->mmio + readl_u(&fp->ring_smt_tx[i].buffer); 798 len = readl_u(&fp->ring_smt_tx[i].rmc) & FZA_RING_PBC_MASK; 799 800 if (!netif_queue_stopped(dev)) { 801 if (dev_nit_active(dev)) { 802 struct fza_buffer_tx *skb_data_ptr; 803 struct sk_buff *skb; 804 805 /* Length must be a multiple of 4 as only word 806 * reads are permitted! 807 */ 808 skb = fza_alloc_skb_irq(dev, (len + 3) & ~3); 809 if (!skb) 810 goto err_no_skb; /* Drop. */ 811 812 skb_data_ptr = (struct fza_buffer_tx *) 813 skb->data; 814 815 fza_reads(smt_tx_ptr, skb_data_ptr, 816 (len + 3) & ~3); 817 skb->dev = dev; 818 skb_reserve(skb, 3); /* Skip over PRH. */ 819 skb_put(skb, len - 3); 820 skb_reset_network_header(skb); 821 822 dev_queue_xmit_nit(skb, dev); 823 824 dev_kfree_skb_irq(skb); 825 826 err_no_skb: 827 ; 828 } 829 830 /* Queue the frame to the RMC transmit ring. */ 831 fza_do_xmit((union fza_buffer_txp) 832 { .mmio_ptr = smt_tx_ptr }, 833 len, dev, 1); 834 } 835 836 writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own); 837 fp->ring_smt_tx_index = 838 (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size; 839 } 840 } 841 842 static void fza_uns(struct net_device *dev) 843 { 844 struct fza_private *fp = netdev_priv(dev); 845 u32 own; 846 int i; 847 848 while (1) { 849 i = fp->ring_uns_index; 850 own = readl_o(&fp->ring_uns[i].own); 851 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 852 break; 853 854 if (readl_u(&fp->ring_uns[i].id) == FZA_RING_UNS_RX_OVER) { 855 fp->stats.rx_errors++; 856 fp->stats.rx_over_errors++; 857 } 858 859 writel_o(FZA_RING_OWN_FZA, &fp->ring_uns[i].own); 860 fp->ring_uns_index = 861 (fp->ring_uns_index + 1) % FZA_RING_UNS_SIZE; 862 } 863 } 864 865 static void fza_tx_flush(struct net_device *dev) 866 { 867 struct fza_private *fp = netdev_priv(dev); 868 u32 own; 869 int i; 870 871 /* Clean up the SMT TX ring. */ 872 i = fp->ring_smt_tx_index; 873 do { 874 writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own); 875 fp->ring_smt_tx_index = 876 (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size; 877 878 } while (i != fp->ring_smt_tx_index); 879 880 /* Clean up the RMC TX ring. */ 881 i = fp->ring_rmc_tx_index; 882 do { 883 own = readl_o(&fp->ring_rmc_tx[i].own); 884 if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) { 885 u32 rmc = readl_u(&fp->ring_rmc_tx[i].rmc); 886 887 writel_u(rmc | FZA_RING_TX_DTP, 888 &fp->ring_rmc_tx[i].rmc); 889 } 890 fp->ring_rmc_tx_index = 891 (fp->ring_rmc_tx_index + 1) % fp->ring_rmc_tx_size; 892 893 } while (i != fp->ring_rmc_tx_index); 894 895 /* Done. */ 896 writew_o(FZA_CONTROL_A_FLUSH_DONE, &fp->regs->control_a); 897 } 898 899 static irqreturn_t fza_interrupt(int irq, void *dev_id) 900 { 901 struct net_device *dev = dev_id; 902 struct fza_private *fp = netdev_priv(dev); 903 uint int_event; 904 905 /* Get interrupt events. */ 906 int_event = readw_o(&fp->regs->int_event) & fp->int_mask; 907 if (int_event == 0) 908 return IRQ_NONE; 909 910 /* Clear the events. */ 911 writew_u(int_event, &fp->regs->int_event); 912 913 /* Now handle the events. The order matters. */ 914 915 /* Command finished interrupt. */ 916 if ((int_event & FZA_EVENT_CMD_DONE) != 0) { 917 fp->irq_count_cmd_done++; 918 919 spin_lock(&fp->lock); 920 fp->cmd_done_flag = 1; 921 wake_up(&fp->cmd_done_wait); 922 spin_unlock(&fp->lock); 923 } 924 925 /* Transmit finished interrupt. */ 926 if ((int_event & FZA_EVENT_TX_DONE) != 0) { 927 fp->irq_count_tx_done++; 928 fza_tx(dev); 929 } 930 931 /* Host receive interrupt. */ 932 if ((int_event & FZA_EVENT_RX_POLL) != 0) { 933 fp->irq_count_rx_poll++; 934 fza_rx(dev); 935 } 936 937 /* SMT transmit interrupt. */ 938 if ((int_event & FZA_EVENT_SMT_TX_POLL) != 0) { 939 fp->irq_count_smt_tx_poll++; 940 fza_tx_smt(dev); 941 } 942 943 /* Transmit ring flush request. */ 944 if ((int_event & FZA_EVENT_FLUSH_TX) != 0) { 945 fp->irq_count_flush_tx++; 946 fza_tx_flush(dev); 947 } 948 949 /* Link status change interrupt. */ 950 if ((int_event & FZA_EVENT_LINK_ST_CHG) != 0) { 951 uint status; 952 953 fp->irq_count_link_st_chg++; 954 status = readw_u(&fp->regs->status); 955 if (FZA_STATUS_GET_LINK(status) == FZA_LINK_ON) { 956 netif_carrier_on(dev); 957 pr_info("%s: link available\n", fp->name); 958 } else { 959 netif_carrier_off(dev); 960 pr_info("%s: link unavailable\n", fp->name); 961 } 962 } 963 964 /* Unsolicited event interrupt. */ 965 if ((int_event & FZA_EVENT_UNS_POLL) != 0) { 966 fp->irq_count_uns_poll++; 967 fza_uns(dev); 968 } 969 970 /* State change interrupt. */ 971 if ((int_event & FZA_EVENT_STATE_CHG) != 0) { 972 uint status, state; 973 974 fp->irq_count_state_chg++; 975 976 status = readw_u(&fp->regs->status); 977 state = FZA_STATUS_GET_STATE(status); 978 pr_debug("%s: state change: %x\n", fp->name, state); 979 switch (state) { 980 case FZA_STATE_RESET: 981 break; 982 983 case FZA_STATE_UNINITIALIZED: 984 netif_carrier_off(dev); 985 del_timer_sync(&fp->reset_timer); 986 fp->ring_cmd_index = 0; 987 fp->ring_uns_index = 0; 988 fp->ring_rmc_tx_index = 0; 989 fp->ring_rmc_txd_index = 0; 990 fp->ring_hst_rx_index = 0; 991 fp->ring_smt_tx_index = 0; 992 fp->ring_smt_rx_index = 0; 993 if (fp->state > state) { 994 pr_info("%s: OK\n", fp->name); 995 fza_cmd_send(dev, FZA_RING_CMD_INIT); 996 } 997 break; 998 999 case FZA_STATE_INITIALIZED: 1000 if (fp->state > state) { 1001 fza_set_rx_mode(dev); 1002 fza_cmd_send(dev, FZA_RING_CMD_PARAM); 1003 } 1004 break; 1005 1006 case FZA_STATE_RUNNING: 1007 case FZA_STATE_MAINTENANCE: 1008 fp->state = state; 1009 fza_rx_init(fp); 1010 fp->queue_active = 1; 1011 netif_wake_queue(dev); 1012 pr_debug("%s: queue woken\n", fp->name); 1013 break; 1014 1015 case FZA_STATE_HALTED: 1016 fp->queue_active = 0; 1017 netif_stop_queue(dev); 1018 pr_debug("%s: queue stopped\n", fp->name); 1019 del_timer_sync(&fp->reset_timer); 1020 pr_warn("%s: halted, reason: %x\n", fp->name, 1021 FZA_STATUS_GET_HALT(status)); 1022 fza_regs_dump(fp); 1023 pr_info("%s: resetting the board...\n", fp->name); 1024 fza_do_reset(fp); 1025 fp->timer_state = 0; 1026 fp->reset_timer.expires = jiffies + 45 * HZ; 1027 add_timer(&fp->reset_timer); 1028 break; 1029 1030 default: 1031 pr_warn("%s: undefined state: %x\n", fp->name, state); 1032 break; 1033 } 1034 1035 spin_lock(&fp->lock); 1036 fp->state_chg_flag = 1; 1037 wake_up(&fp->state_chg_wait); 1038 spin_unlock(&fp->lock); 1039 } 1040 1041 return IRQ_HANDLED; 1042 } 1043 1044 static void fza_reset_timer(struct timer_list *t) 1045 { 1046 struct fza_private *fp = from_timer(fp, t, reset_timer); 1047 1048 if (!fp->timer_state) { 1049 pr_err("%s: RESET timed out!\n", fp->name); 1050 pr_info("%s: trying harder...\n", fp->name); 1051 1052 /* Assert the board reset. */ 1053 writew_o(FZA_RESET_INIT, &fp->regs->reset); 1054 readw_o(&fp->regs->reset); /* Synchronize. */ 1055 1056 fp->timer_state = 1; 1057 fp->reset_timer.expires = jiffies + HZ; 1058 } else { 1059 /* Clear the board reset. */ 1060 writew_u(FZA_RESET_CLR, &fp->regs->reset); 1061 1062 /* Enable all interrupt events we handle. */ 1063 writew_o(fp->int_mask, &fp->regs->int_mask); 1064 readw_o(&fp->regs->int_mask); /* Synchronize. */ 1065 1066 fp->timer_state = 0; 1067 fp->reset_timer.expires = jiffies + 45 * HZ; 1068 } 1069 add_timer(&fp->reset_timer); 1070 } 1071 1072 static int fza_set_mac_address(struct net_device *dev, void *addr) 1073 { 1074 return -EOPNOTSUPP; 1075 } 1076 1077 static netdev_tx_t fza_start_xmit(struct sk_buff *skb, struct net_device *dev) 1078 { 1079 struct fza_private *fp = netdev_priv(dev); 1080 unsigned int old_mask, new_mask; 1081 int ret; 1082 u8 fc; 1083 1084 skb_push(skb, 3); /* Make room for PRH. */ 1085 1086 /* Decode FC to set PRH. */ 1087 fc = skb->data[3]; 1088 skb->data[0] = 0; 1089 skb->data[1] = 0; 1090 skb->data[2] = FZA_PRH2_NORMAL; 1091 if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_SYNC) 1092 skb->data[0] |= FZA_PRH0_FRAME_SYNC; 1093 switch (fc & FDDI_FC_K_FORMAT_MASK) { 1094 case FDDI_FC_K_FORMAT_MANAGEMENT: 1095 if ((fc & FDDI_FC_K_CONTROL_MASK) == 0) { 1096 /* Token. */ 1097 skb->data[0] |= FZA_PRH0_TKN_TYPE_IMM; 1098 skb->data[1] |= FZA_PRH1_TKN_SEND_NONE; 1099 } else { 1100 /* SMT or MAC. */ 1101 skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR; 1102 skb->data[1] |= FZA_PRH1_TKN_SEND_UNR; 1103 } 1104 skb->data[1] |= FZA_PRH1_CRC_NORMAL; 1105 break; 1106 case FDDI_FC_K_FORMAT_LLC: 1107 case FDDI_FC_K_FORMAT_FUTURE: 1108 skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR; 1109 skb->data[1] |= FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR; 1110 break; 1111 case FDDI_FC_K_FORMAT_IMPLEMENTOR: 1112 skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR; 1113 skb->data[1] |= FZA_PRH1_TKN_SEND_ORIG; 1114 break; 1115 } 1116 1117 /* SMT transmit interrupts may sneak frames into the RMC 1118 * transmit ring. We disable them while queueing a frame 1119 * to maintain consistency. 1120 */ 1121 old_mask = fp->int_mask; 1122 new_mask = old_mask & ~FZA_MASK_SMT_TX_POLL; 1123 writew_u(new_mask, &fp->regs->int_mask); 1124 readw_o(&fp->regs->int_mask); /* Synchronize. */ 1125 fp->int_mask = new_mask; 1126 ret = fza_do_xmit((union fza_buffer_txp) 1127 { .data_ptr = (struct fza_buffer_tx *)skb->data }, 1128 skb->len, dev, 0); 1129 fp->int_mask = old_mask; 1130 writew_u(fp->int_mask, &fp->regs->int_mask); 1131 1132 if (ret) { 1133 /* Probably an SMT packet filled the remaining space, 1134 * so just stop the queue, but don't report it as an error. 1135 */ 1136 netif_stop_queue(dev); 1137 pr_debug("%s: queue stopped\n", fp->name); 1138 fp->stats.tx_dropped++; 1139 } 1140 1141 dev_kfree_skb(skb); 1142 1143 return ret; 1144 } 1145 1146 static int fza_open(struct net_device *dev) 1147 { 1148 struct fza_private *fp = netdev_priv(dev); 1149 struct fza_ring_cmd __iomem *ring; 1150 struct sk_buff *skb; 1151 unsigned long flags; 1152 dma_addr_t dma; 1153 int ret, i; 1154 u32 stat; 1155 long t; 1156 1157 for (i = 0; i < FZA_RING_RX_SIZE; i++) { 1158 /* We have to 512-byte-align RX buffers... */ 1159 skb = fza_alloc_skb(dev, FZA_RX_BUFFER_SIZE + 511); 1160 if (skb) { 1161 fza_skb_align(skb, 512); 1162 dma = dma_map_single(fp->bdev, skb->data, 1163 FZA_RX_BUFFER_SIZE, 1164 DMA_FROM_DEVICE); 1165 if (dma_mapping_error(fp->bdev, dma)) { 1166 dev_kfree_skb(skb); 1167 skb = NULL; 1168 } 1169 } 1170 if (!skb) { 1171 for (--i; i >= 0; i--) { 1172 dma_unmap_single(fp->bdev, fp->rx_dma[i], 1173 FZA_RX_BUFFER_SIZE, 1174 DMA_FROM_DEVICE); 1175 dev_kfree_skb(fp->rx_skbuff[i]); 1176 fp->rx_dma[i] = 0; 1177 fp->rx_skbuff[i] = NULL; 1178 } 1179 return -ENOMEM; 1180 } 1181 fp->rx_skbuff[i] = skb; 1182 fp->rx_dma[i] = dma; 1183 } 1184 1185 ret = fza_init_send(dev, NULL); 1186 if (ret != 0) 1187 return ret; 1188 1189 /* Purger and Beacon multicasts need to be supplied before PARAM. */ 1190 fza_set_rx_mode(dev); 1191 1192 spin_lock_irqsave(&fp->lock, flags); 1193 fp->cmd_done_flag = 0; 1194 ring = fza_cmd_send(dev, FZA_RING_CMD_PARAM); 1195 spin_unlock_irqrestore(&fp->lock, flags); 1196 if (!ring) 1197 return -ENOBUFS; 1198 1199 t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ); 1200 if (fp->cmd_done_flag == 0) { 1201 pr_err("%s: PARAM command timed out!, state %x\n", fp->name, 1202 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 1203 return -EIO; 1204 } 1205 stat = readl_u(&ring->stat); 1206 if (stat != FZA_RING_STAT_SUCCESS) { 1207 pr_err("%s: PARAM command failed!, status %02x, state %x\n", 1208 fp->name, stat, 1209 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 1210 return -EIO; 1211 } 1212 pr_debug("%s: PARAM: %lums elapsed\n", fp->name, 1213 (3 * HZ - t) * 1000 / HZ); 1214 1215 return 0; 1216 } 1217 1218 static int fza_close(struct net_device *dev) 1219 { 1220 struct fza_private *fp = netdev_priv(dev); 1221 unsigned long flags; 1222 uint state; 1223 long t; 1224 int i; 1225 1226 netif_stop_queue(dev); 1227 pr_debug("%s: queue stopped\n", fp->name); 1228 1229 del_timer_sync(&fp->reset_timer); 1230 spin_lock_irqsave(&fp->lock, flags); 1231 fp->state = FZA_STATE_UNINITIALIZED; 1232 fp->state_chg_flag = 0; 1233 /* Shut the interface down. */ 1234 writew_o(FZA_CONTROL_A_SHUT, &fp->regs->control_a); 1235 readw_o(&fp->regs->control_a); /* Synchronize. */ 1236 spin_unlock_irqrestore(&fp->lock, flags); 1237 1238 /* DEC says SHUT needs up to 10 seconds to complete. */ 1239 t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag, 1240 15 * HZ); 1241 state = FZA_STATUS_GET_STATE(readw_o(&fp->regs->status)); 1242 if (fp->state_chg_flag == 0) { 1243 pr_err("%s: SHUT timed out!, state %x\n", fp->name, state); 1244 return -EIO; 1245 } 1246 if (state != FZA_STATE_UNINITIALIZED) { 1247 pr_err("%s: SHUT failed!, state %x\n", fp->name, state); 1248 return -EIO; 1249 } 1250 pr_debug("%s: SHUT: %lums elapsed\n", fp->name, 1251 (15 * HZ - t) * 1000 / HZ); 1252 1253 for (i = 0; i < FZA_RING_RX_SIZE; i++) 1254 if (fp->rx_skbuff[i]) { 1255 dma_unmap_single(fp->bdev, fp->rx_dma[i], 1256 FZA_RX_BUFFER_SIZE, DMA_FROM_DEVICE); 1257 dev_kfree_skb(fp->rx_skbuff[i]); 1258 fp->rx_dma[i] = 0; 1259 fp->rx_skbuff[i] = NULL; 1260 } 1261 1262 return 0; 1263 } 1264 1265 static struct net_device_stats *fza_get_stats(struct net_device *dev) 1266 { 1267 struct fza_private *fp = netdev_priv(dev); 1268 1269 return &fp->stats; 1270 } 1271 1272 static int fza_probe(struct device *bdev) 1273 { 1274 static const struct net_device_ops netdev_ops = { 1275 .ndo_open = fza_open, 1276 .ndo_stop = fza_close, 1277 .ndo_start_xmit = fza_start_xmit, 1278 .ndo_set_rx_mode = fza_set_rx_mode, 1279 .ndo_set_mac_address = fza_set_mac_address, 1280 .ndo_get_stats = fza_get_stats, 1281 }; 1282 static int version_printed; 1283 char rom_rev[4], fw_rev[4], rmc_rev[4]; 1284 struct tc_dev *tdev = to_tc_dev(bdev); 1285 struct fza_cmd_init __iomem *init; 1286 resource_size_t start, len; 1287 struct net_device *dev; 1288 struct fza_private *fp; 1289 uint smt_ver, pmd_type; 1290 void __iomem *mmio; 1291 uint hw_addr[2]; 1292 int ret, i; 1293 1294 if (!version_printed) { 1295 pr_info("%s", version); 1296 version_printed = 1; 1297 } 1298 1299 dev = alloc_fddidev(sizeof(*fp)); 1300 if (!dev) 1301 return -ENOMEM; 1302 SET_NETDEV_DEV(dev, bdev); 1303 1304 fp = netdev_priv(dev); 1305 dev_set_drvdata(bdev, dev); 1306 1307 fp->bdev = bdev; 1308 fp->name = dev_name(bdev); 1309 1310 /* Request the I/O MEM resource. */ 1311 start = tdev->resource.start; 1312 len = tdev->resource.end - start + 1; 1313 if (!request_mem_region(start, len, dev_name(bdev))) { 1314 pr_err("%s: cannot reserve MMIO region\n", fp->name); 1315 ret = -EBUSY; 1316 goto err_out_kfree; 1317 } 1318 1319 /* MMIO mapping setup. */ 1320 mmio = ioremap_nocache(start, len); 1321 if (!mmio) { 1322 pr_err("%s: cannot map MMIO\n", fp->name); 1323 ret = -ENOMEM; 1324 goto err_out_resource; 1325 } 1326 1327 /* Initialize the new device structure. */ 1328 switch (loopback) { 1329 case FZA_LOOP_NORMAL: 1330 case FZA_LOOP_INTERN: 1331 case FZA_LOOP_EXTERN: 1332 break; 1333 default: 1334 loopback = FZA_LOOP_NORMAL; 1335 } 1336 1337 fp->mmio = mmio; 1338 dev->irq = tdev->interrupt; 1339 1340 pr_info("%s: DEC FDDIcontroller 700 or 700-C at 0x%08llx, irq %d\n", 1341 fp->name, (long long)tdev->resource.start, dev->irq); 1342 pr_debug("%s: mapped at: 0x%p\n", fp->name, mmio); 1343 1344 fp->regs = mmio + FZA_REG_BASE; 1345 fp->ring_cmd = mmio + FZA_RING_CMD; 1346 fp->ring_uns = mmio + FZA_RING_UNS; 1347 1348 init_waitqueue_head(&fp->state_chg_wait); 1349 init_waitqueue_head(&fp->cmd_done_wait); 1350 spin_lock_init(&fp->lock); 1351 fp->int_mask = FZA_MASK_NORMAL; 1352 1353 timer_setup(&fp->reset_timer, fza_reset_timer, 0); 1354 1355 /* Sanitize the board. */ 1356 fza_regs_dump(fp); 1357 fza_do_shutdown(fp); 1358 1359 ret = request_irq(dev->irq, fza_interrupt, IRQF_SHARED, fp->name, dev); 1360 if (ret != 0) { 1361 pr_err("%s: unable to get IRQ %d!\n", fp->name, dev->irq); 1362 goto err_out_map; 1363 } 1364 1365 /* Enable the driver mode. */ 1366 writew_o(FZA_CONTROL_B_DRIVER, &fp->regs->control_b); 1367 1368 /* For some reason transmit done interrupts can trigger during 1369 * reset. This avoids a division error in the handler. 1370 */ 1371 fp->ring_rmc_tx_size = FZA_RING_TX_SIZE; 1372 1373 ret = fza_reset(fp); 1374 if (ret != 0) 1375 goto err_out_irq; 1376 1377 ret = fza_init_send(dev, &init); 1378 if (ret != 0) 1379 goto err_out_irq; 1380 1381 fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr)); 1382 memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN); 1383 1384 fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev)); 1385 fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev)); 1386 fza_reads(&init->rmc_rev, &rmc_rev, sizeof(rmc_rev)); 1387 for (i = 3; i >= 0 && rom_rev[i] == ' '; i--) 1388 rom_rev[i] = 0; 1389 for (i = 3; i >= 0 && fw_rev[i] == ' '; i--) 1390 fw_rev[i] = 0; 1391 for (i = 3; i >= 0 && rmc_rev[i] == ' '; i--) 1392 rmc_rev[i] = 0; 1393 1394 fp->ring_rmc_tx = mmio + readl_u(&init->rmc_tx); 1395 fp->ring_rmc_tx_size = readl_u(&init->rmc_tx_size); 1396 fp->ring_hst_rx = mmio + readl_u(&init->hst_rx); 1397 fp->ring_hst_rx_size = readl_u(&init->hst_rx_size); 1398 fp->ring_smt_tx = mmio + readl_u(&init->smt_tx); 1399 fp->ring_smt_tx_size = readl_u(&init->smt_tx_size); 1400 fp->ring_smt_rx = mmio + readl_u(&init->smt_rx); 1401 fp->ring_smt_rx_size = readl_u(&init->smt_rx_size); 1402 1403 fp->buffer_tx = mmio + FZA_TX_BUFFER_ADDR(readl_u(&init->rmc_tx)); 1404 1405 fp->t_max = readl_u(&init->def_t_max); 1406 fp->t_req = readl_u(&init->def_t_req); 1407 fp->tvx = readl_u(&init->def_tvx); 1408 fp->lem_threshold = readl_u(&init->lem_threshold); 1409 fza_reads(&init->def_station_id, &fp->station_id, 1410 sizeof(fp->station_id)); 1411 fp->rtoken_timeout = readl_u(&init->rtoken_timeout); 1412 fp->ring_purger = readl_u(&init->ring_purger); 1413 1414 smt_ver = readl_u(&init->smt_ver); 1415 pmd_type = readl_u(&init->pmd_type); 1416 1417 pr_debug("%s: INIT parameters:\n", fp->name); 1418 pr_debug(" tx_mode: %u\n", readl_u(&init->tx_mode)); 1419 pr_debug(" hst_rx_size: %u\n", readl_u(&init->hst_rx_size)); 1420 pr_debug(" rmc_rev: %.4s\n", rmc_rev); 1421 pr_debug(" rom_rev: %.4s\n", rom_rev); 1422 pr_debug(" fw_rev: %.4s\n", fw_rev); 1423 pr_debug(" mop_type: %u\n", readl_u(&init->mop_type)); 1424 pr_debug(" hst_rx: 0x%08x\n", readl_u(&init->hst_rx)); 1425 pr_debug(" rmc_tx: 0x%08x\n", readl_u(&init->rmc_tx)); 1426 pr_debug(" rmc_tx_size: %u\n", readl_u(&init->rmc_tx_size)); 1427 pr_debug(" smt_tx: 0x%08x\n", readl_u(&init->smt_tx)); 1428 pr_debug(" smt_tx_size: %u\n", readl_u(&init->smt_tx_size)); 1429 pr_debug(" smt_rx: 0x%08x\n", readl_u(&init->smt_rx)); 1430 pr_debug(" smt_rx_size: %u\n", readl_u(&init->smt_rx_size)); 1431 /* TC systems are always LE, so don't bother swapping. */ 1432 pr_debug(" hw_addr: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 1433 (readl_u(&init->hw_addr[0]) >> 0) & 0xff, 1434 (readl_u(&init->hw_addr[0]) >> 8) & 0xff, 1435 (readl_u(&init->hw_addr[0]) >> 16) & 0xff, 1436 (readl_u(&init->hw_addr[0]) >> 24) & 0xff, 1437 (readl_u(&init->hw_addr[1]) >> 0) & 0xff, 1438 (readl_u(&init->hw_addr[1]) >> 8) & 0xff, 1439 (readl_u(&init->hw_addr[1]) >> 16) & 0xff, 1440 (readl_u(&init->hw_addr[1]) >> 24) & 0xff); 1441 pr_debug(" def_t_req: %u\n", readl_u(&init->def_t_req)); 1442 pr_debug(" def_tvx: %u\n", readl_u(&init->def_tvx)); 1443 pr_debug(" def_t_max: %u\n", readl_u(&init->def_t_max)); 1444 pr_debug(" lem_threshold: %u\n", readl_u(&init->lem_threshold)); 1445 /* Don't bother swapping, see above. */ 1446 pr_debug(" def_station_id: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 1447 (readl_u(&init->def_station_id[0]) >> 0) & 0xff, 1448 (readl_u(&init->def_station_id[0]) >> 8) & 0xff, 1449 (readl_u(&init->def_station_id[0]) >> 16) & 0xff, 1450 (readl_u(&init->def_station_id[0]) >> 24) & 0xff, 1451 (readl_u(&init->def_station_id[1]) >> 0) & 0xff, 1452 (readl_u(&init->def_station_id[1]) >> 8) & 0xff, 1453 (readl_u(&init->def_station_id[1]) >> 16) & 0xff, 1454 (readl_u(&init->def_station_id[1]) >> 24) & 0xff); 1455 pr_debug(" pmd_type_alt: %u\n", readl_u(&init->pmd_type_alt)); 1456 pr_debug(" smt_ver: %u\n", readl_u(&init->smt_ver)); 1457 pr_debug(" rtoken_timeout: %u\n", readl_u(&init->rtoken_timeout)); 1458 pr_debug(" ring_purger: %u\n", readl_u(&init->ring_purger)); 1459 pr_debug(" smt_ver_max: %u\n", readl_u(&init->smt_ver_max)); 1460 pr_debug(" smt_ver_min: %u\n", readl_u(&init->smt_ver_min)); 1461 pr_debug(" pmd_type: %u\n", readl_u(&init->pmd_type)); 1462 1463 pr_info("%s: model %s, address %pMF\n", 1464 fp->name, 1465 pmd_type == FZA_PMD_TYPE_TW ? 1466 "700-C (DEFZA-CA), ThinWire PMD selected" : 1467 pmd_type == FZA_PMD_TYPE_STP ? 1468 "700-C (DEFZA-CA), STP PMD selected" : 1469 "700 (DEFZA-AA), MMF PMD", 1470 dev->dev_addr); 1471 pr_info("%s: ROM rev. %.4s, firmware rev. %.4s, RMC rev. %.4s, " 1472 "SMT ver. %u\n", fp->name, rom_rev, fw_rev, rmc_rev, smt_ver); 1473 1474 /* Now that we fetched initial parameters just shut the interface 1475 * until opened. 1476 */ 1477 ret = fza_close(dev); 1478 if (ret != 0) 1479 goto err_out_irq; 1480 1481 /* The FZA-specific entries in the device structure. */ 1482 dev->netdev_ops = &netdev_ops; 1483 1484 ret = register_netdev(dev); 1485 if (ret != 0) 1486 goto err_out_irq; 1487 1488 pr_info("%s: registered as %s\n", fp->name, dev->name); 1489 fp->name = (const char *)dev->name; 1490 1491 get_device(bdev); 1492 return 0; 1493 1494 err_out_irq: 1495 del_timer_sync(&fp->reset_timer); 1496 fza_do_shutdown(fp); 1497 free_irq(dev->irq, dev); 1498 1499 err_out_map: 1500 iounmap(mmio); 1501 1502 err_out_resource: 1503 release_mem_region(start, len); 1504 1505 err_out_kfree: 1506 free_netdev(dev); 1507 1508 pr_err("%s: initialization failure, aborting!\n", fp->name); 1509 return ret; 1510 } 1511 1512 static int fza_remove(struct device *bdev) 1513 { 1514 struct net_device *dev = dev_get_drvdata(bdev); 1515 struct fza_private *fp = netdev_priv(dev); 1516 struct tc_dev *tdev = to_tc_dev(bdev); 1517 resource_size_t start, len; 1518 1519 put_device(bdev); 1520 1521 unregister_netdev(dev); 1522 1523 del_timer_sync(&fp->reset_timer); 1524 fza_do_shutdown(fp); 1525 free_irq(dev->irq, dev); 1526 1527 iounmap(fp->mmio); 1528 1529 start = tdev->resource.start; 1530 len = tdev->resource.end - start + 1; 1531 release_mem_region(start, len); 1532 1533 free_netdev(dev); 1534 1535 return 0; 1536 } 1537 1538 static struct tc_device_id const fza_tc_table[] = { 1539 { "DEC ", "PMAF-AA " }, 1540 { } 1541 }; 1542 MODULE_DEVICE_TABLE(tc, fza_tc_table); 1543 1544 static struct tc_driver fza_driver = { 1545 .id_table = fza_tc_table, 1546 .driver = { 1547 .name = "defza", 1548 .bus = &tc_bus_type, 1549 .probe = fza_probe, 1550 .remove = fza_remove, 1551 }, 1552 }; 1553 1554 static int fza_init(void) 1555 { 1556 return tc_register_driver(&fza_driver); 1557 } 1558 1559 static void fza_exit(void) 1560 { 1561 tc_unregister_driver(&fza_driver); 1562 } 1563 1564 module_init(fza_init); 1565 module_exit(fza_exit); 1566