1 // SPDX-License-Identifier: GPL-2.0 2 /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices. 3 * 4 * Copyright (c) 2018 Maciej W. Rozycki 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * References: 12 * 13 * Dave Sawyer & Phil Weeks & Frank Itkowsky, 14 * "DEC FDDIcontroller 700 Port Specification", 15 * Revision 1.1, Digital Equipment Corporation 16 */ 17 18 /* ------------------------------------------------------------------------- */ 19 /* FZA configurable parameters. */ 20 21 /* The number of transmit ring descriptors; either 0 for 512 or 1 for 1024. */ 22 #define FZA_RING_TX_MODE 0 23 24 /* The number of receive ring descriptors; from 2 up to 256. */ 25 #define FZA_RING_RX_SIZE 256 26 27 /* End of FZA configurable parameters. No need to change anything below. */ 28 /* ------------------------------------------------------------------------- */ 29 30 #include <linux/delay.h> 31 #include <linux/device.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/init.h> 34 #include <linux/interrupt.h> 35 #include <linux/io.h> 36 #include <linux/ioport.h> 37 #include <linux/kernel.h> 38 #include <linux/list.h> 39 #include <linux/module.h> 40 #include <linux/netdevice.h> 41 #include <linux/fddidevice.h> 42 #include <linux/sched.h> 43 #include <linux/skbuff.h> 44 #include <linux/spinlock.h> 45 #include <linux/stat.h> 46 #include <linux/tc.h> 47 #include <linux/timer.h> 48 #include <linux/types.h> 49 #include <linux/wait.h> 50 51 #include <asm/barrier.h> 52 53 #include "defza.h" 54 55 #define DRV_NAME "defza" 56 #define DRV_VERSION "v.1.1.4" 57 #define DRV_RELDATE "Oct 6 2018" 58 59 static char version[] = 60 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n"; 61 62 MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); 63 MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver"); 64 MODULE_LICENSE("GPL"); 65 66 static int loopback; 67 module_param(loopback, int, 0644); 68 69 /* Ring Purger Multicast */ 70 static u8 hw_addr_purger[8] = { 0x09, 0x00, 0x2b, 0x02, 0x01, 0x05 }; 71 /* Directed Beacon Multicast */ 72 static u8 hw_addr_beacon[8] = { 0x01, 0x80, 0xc2, 0x00, 0x01, 0x00 }; 73 74 /* Shorthands for MMIO accesses that we require to be strongly ordered 75 * WRT preceding MMIO accesses. 76 */ 77 #define readw_o readw_relaxed 78 #define readl_o readl_relaxed 79 80 #define writew_o writew_relaxed 81 #define writel_o writel_relaxed 82 83 /* Shorthands for MMIO accesses that we are happy with being weakly ordered 84 * WRT preceding MMIO accesses. 85 */ 86 #define readw_u readw_relaxed 87 #define readl_u readl_relaxed 88 #define readq_u readq_relaxed 89 90 #define writew_u writew_relaxed 91 #define writel_u writel_relaxed 92 #define writeq_u writeq_relaxed 93 94 static inline struct sk_buff *fza_alloc_skb_irq(struct net_device *dev, 95 unsigned int length) 96 { 97 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 98 } 99 100 static inline struct sk_buff *fza_alloc_skb(struct net_device *dev, 101 unsigned int length) 102 { 103 return __netdev_alloc_skb(dev, length, GFP_KERNEL); 104 } 105 106 static inline void fza_skb_align(struct sk_buff *skb, unsigned int v) 107 { 108 unsigned long x, y; 109 110 x = (unsigned long)skb->data; 111 y = ALIGN(x, v); 112 113 skb_reserve(skb, y - x); 114 } 115 116 static inline void fza_reads(const void __iomem *from, void *to, 117 unsigned long size) 118 { 119 if (sizeof(unsigned long) == 8) { 120 const u64 __iomem *src = from; 121 const u32 __iomem *src_trail; 122 u64 *dst = to; 123 u32 *dst_trail; 124 125 for (size = (size + 3) / 4; size > 1; size -= 2) 126 *dst++ = readq_u(src++); 127 if (size) { 128 src_trail = (u32 __iomem *)src; 129 dst_trail = (u32 *)dst; 130 *dst_trail = readl_u(src_trail); 131 } 132 } else { 133 const u32 __iomem *src = from; 134 u32 *dst = to; 135 136 for (size = (size + 3) / 4; size; size--) 137 *dst++ = readl_u(src++); 138 } 139 } 140 141 static inline void fza_writes(const void *from, void __iomem *to, 142 unsigned long size) 143 { 144 if (sizeof(unsigned long) == 8) { 145 const u64 *src = from; 146 const u32 *src_trail; 147 u64 __iomem *dst = to; 148 u32 __iomem *dst_trail; 149 150 for (size = (size + 3) / 4; size > 1; size -= 2) 151 writeq_u(*src++, dst++); 152 if (size) { 153 src_trail = (u32 *)src; 154 dst_trail = (u32 __iomem *)dst; 155 writel_u(*src_trail, dst_trail); 156 } 157 } else { 158 const u32 *src = from; 159 u32 __iomem *dst = to; 160 161 for (size = (size + 3) / 4; size; size--) 162 writel_u(*src++, dst++); 163 } 164 } 165 166 static inline void fza_moves(const void __iomem *from, void __iomem *to, 167 unsigned long size) 168 { 169 if (sizeof(unsigned long) == 8) { 170 const u64 __iomem *src = from; 171 const u32 __iomem *src_trail; 172 u64 __iomem *dst = to; 173 u32 __iomem *dst_trail; 174 175 for (size = (size + 3) / 4; size > 1; size -= 2) 176 writeq_u(readq_u(src++), dst++); 177 if (size) { 178 src_trail = (u32 __iomem *)src; 179 dst_trail = (u32 __iomem *)dst; 180 writel_u(readl_u(src_trail), dst_trail); 181 } 182 } else { 183 const u32 __iomem *src = from; 184 u32 __iomem *dst = to; 185 186 for (size = (size + 3) / 4; size; size--) 187 writel_u(readl_u(src++), dst++); 188 } 189 } 190 191 static inline void fza_zeros(void __iomem *to, unsigned long size) 192 { 193 if (sizeof(unsigned long) == 8) { 194 u64 __iomem *dst = to; 195 u32 __iomem *dst_trail; 196 197 for (size = (size + 3) / 4; size > 1; size -= 2) 198 writeq_u(0, dst++); 199 if (size) { 200 dst_trail = (u32 __iomem *)dst; 201 writel_u(0, dst_trail); 202 } 203 } else { 204 u32 __iomem *dst = to; 205 206 for (size = (size + 3) / 4; size; size--) 207 writel_u(0, dst++); 208 } 209 } 210 211 static inline void fza_regs_dump(struct fza_private *fp) 212 { 213 pr_debug("%s: iomem registers:\n", fp->name); 214 pr_debug(" reset: 0x%04x\n", readw_o(&fp->regs->reset)); 215 pr_debug(" interrupt event: 0x%04x\n", readw_u(&fp->regs->int_event)); 216 pr_debug(" status: 0x%04x\n", readw_u(&fp->regs->status)); 217 pr_debug(" interrupt mask: 0x%04x\n", readw_u(&fp->regs->int_mask)); 218 pr_debug(" control A: 0x%04x\n", readw_u(&fp->regs->control_a)); 219 pr_debug(" control B: 0x%04x\n", readw_u(&fp->regs->control_b)); 220 } 221 222 static inline void fza_do_reset(struct fza_private *fp) 223 { 224 /* Reset the board. */ 225 writew_o(FZA_RESET_INIT, &fp->regs->reset); 226 readw_o(&fp->regs->reset); /* Synchronize. */ 227 readw_o(&fp->regs->reset); /* Read it back for a small delay. */ 228 writew_o(FZA_RESET_CLR, &fp->regs->reset); 229 230 /* Enable all interrupt events we handle. */ 231 writew_o(fp->int_mask, &fp->regs->int_mask); 232 readw_o(&fp->regs->int_mask); /* Synchronize. */ 233 } 234 235 static inline void fza_do_shutdown(struct fza_private *fp) 236 { 237 /* Disable the driver mode. */ 238 writew_o(FZA_CONTROL_B_IDLE, &fp->regs->control_b); 239 240 /* And reset the board. */ 241 writew_o(FZA_RESET_INIT, &fp->regs->reset); 242 readw_o(&fp->regs->reset); /* Synchronize. */ 243 writew_o(FZA_RESET_CLR, &fp->regs->reset); 244 readw_o(&fp->regs->reset); /* Synchronize. */ 245 } 246 247 static int fza_reset(struct fza_private *fp) 248 { 249 unsigned long flags; 250 uint status, state; 251 long t; 252 253 pr_info("%s: resetting the board...\n", fp->name); 254 255 spin_lock_irqsave(&fp->lock, flags); 256 fp->state_chg_flag = 0; 257 fza_do_reset(fp); 258 spin_unlock_irqrestore(&fp->lock, flags); 259 260 /* DEC says RESET needs up to 30 seconds to complete. My DEFZA-AA 261 * rev. C03 happily finishes in 9.7 seconds. :-) But we need to 262 * be on the safe side... 263 */ 264 t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag, 265 45 * HZ); 266 status = readw_u(&fp->regs->status); 267 state = FZA_STATUS_GET_STATE(status); 268 if (fp->state_chg_flag == 0) { 269 pr_err("%s: RESET timed out!, state %x\n", fp->name, state); 270 return -EIO; 271 } 272 if (state != FZA_STATE_UNINITIALIZED) { 273 pr_err("%s: RESET failed!, state %x, failure ID %x\n", 274 fp->name, state, FZA_STATUS_GET_TEST(status)); 275 return -EIO; 276 } 277 pr_info("%s: OK\n", fp->name); 278 pr_debug("%s: RESET: %lums elapsed\n", fp->name, 279 (45 * HZ - t) * 1000 / HZ); 280 281 return 0; 282 } 283 284 static struct fza_ring_cmd __iomem *fza_cmd_send(struct net_device *dev, 285 int command) 286 { 287 struct fza_private *fp = netdev_priv(dev); 288 struct fza_ring_cmd __iomem *ring = fp->ring_cmd + fp->ring_cmd_index; 289 unsigned int old_mask, new_mask; 290 union fza_cmd_buf __iomem *buf; 291 struct netdev_hw_addr *ha; 292 int i; 293 294 old_mask = fp->int_mask; 295 new_mask = old_mask & ~FZA_MASK_STATE_CHG; 296 writew_u(new_mask, &fp->regs->int_mask); 297 readw_o(&fp->regs->int_mask); /* Synchronize. */ 298 fp->int_mask = new_mask; 299 300 buf = fp->mmio + readl_u(&ring->buffer); 301 302 if ((readl_u(&ring->cmd_own) & FZA_RING_OWN_MASK) != 303 FZA_RING_OWN_HOST) { 304 pr_warn("%s: command buffer full, command: %u!\n", fp->name, 305 command); 306 return NULL; 307 } 308 309 switch (command) { 310 case FZA_RING_CMD_INIT: 311 writel_u(FZA_RING_TX_MODE, &buf->init.tx_mode); 312 writel_u(FZA_RING_RX_SIZE, &buf->init.hst_rx_size); 313 fza_zeros(&buf->init.counters, sizeof(buf->init.counters)); 314 break; 315 316 case FZA_RING_CMD_MODCAM: 317 i = 0; 318 fza_writes(&hw_addr_purger, &buf->cam.hw_addr[i++], 319 sizeof(*buf->cam.hw_addr)); 320 fza_writes(&hw_addr_beacon, &buf->cam.hw_addr[i++], 321 sizeof(*buf->cam.hw_addr)); 322 netdev_for_each_mc_addr(ha, dev) { 323 if (i >= FZA_CMD_CAM_SIZE) 324 break; 325 fza_writes(ha->addr, &buf->cam.hw_addr[i++], 326 sizeof(*buf->cam.hw_addr)); 327 } 328 while (i < FZA_CMD_CAM_SIZE) 329 fza_zeros(&buf->cam.hw_addr[i++], 330 sizeof(*buf->cam.hw_addr)); 331 break; 332 333 case FZA_RING_CMD_PARAM: 334 writel_u(loopback, &buf->param.loop_mode); 335 writel_u(fp->t_max, &buf->param.t_max); 336 writel_u(fp->t_req, &buf->param.t_req); 337 writel_u(fp->tvx, &buf->param.tvx); 338 writel_u(fp->lem_threshold, &buf->param.lem_threshold); 339 fza_writes(&fp->station_id, &buf->param.station_id, 340 sizeof(buf->param.station_id)); 341 /* Convert to milliseconds due to buggy firmware. */ 342 writel_u(fp->rtoken_timeout / 12500, 343 &buf->param.rtoken_timeout); 344 writel_u(fp->ring_purger, &buf->param.ring_purger); 345 break; 346 347 case FZA_RING_CMD_MODPROM: 348 if (dev->flags & IFF_PROMISC) { 349 writel_u(1, &buf->modprom.llc_prom); 350 writel_u(1, &buf->modprom.smt_prom); 351 } else { 352 writel_u(0, &buf->modprom.llc_prom); 353 writel_u(0, &buf->modprom.smt_prom); 354 } 355 if (dev->flags & IFF_ALLMULTI || 356 netdev_mc_count(dev) > FZA_CMD_CAM_SIZE - 2) 357 writel_u(1, &buf->modprom.llc_multi); 358 else 359 writel_u(0, &buf->modprom.llc_multi); 360 writel_u(1, &buf->modprom.llc_bcast); 361 break; 362 } 363 364 /* Trigger the command. */ 365 writel_u(FZA_RING_OWN_FZA | command, &ring->cmd_own); 366 writew_o(FZA_CONTROL_A_CMD_POLL, &fp->regs->control_a); 367 368 fp->ring_cmd_index = (fp->ring_cmd_index + 1) % FZA_RING_CMD_SIZE; 369 370 fp->int_mask = old_mask; 371 writew_u(fp->int_mask, &fp->regs->int_mask); 372 373 return ring; 374 } 375 376 static int fza_init_send(struct net_device *dev, 377 struct fza_cmd_init *__iomem *init) 378 { 379 struct fza_private *fp = netdev_priv(dev); 380 struct fza_ring_cmd __iomem *ring; 381 unsigned long flags; 382 u32 stat; 383 long t; 384 385 spin_lock_irqsave(&fp->lock, flags); 386 fp->cmd_done_flag = 0; 387 ring = fza_cmd_send(dev, FZA_RING_CMD_INIT); 388 spin_unlock_irqrestore(&fp->lock, flags); 389 if (!ring) 390 /* This should never happen in the uninitialized state, 391 * so do not try to recover and just consider it fatal. 392 */ 393 return -ENOBUFS; 394 395 /* INIT may take quite a long time (160ms for my C03). */ 396 t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ); 397 if (fp->cmd_done_flag == 0) { 398 pr_err("%s: INIT command timed out!, state %x\n", fp->name, 399 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 400 return -EIO; 401 } 402 stat = readl_u(&ring->stat); 403 if (stat != FZA_RING_STAT_SUCCESS) { 404 pr_err("%s: INIT command failed!, status %02x, state %x\n", 405 fp->name, stat, 406 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 407 return -EIO; 408 } 409 pr_debug("%s: INIT: %lums elapsed\n", fp->name, 410 (3 * HZ - t) * 1000 / HZ); 411 412 if (init) 413 *init = fp->mmio + readl_u(&ring->buffer); 414 return 0; 415 } 416 417 static void fza_rx_init(struct fza_private *fp) 418 { 419 int i; 420 421 /* Fill the host receive descriptor ring. */ 422 for (i = 0; i < FZA_RING_RX_SIZE; i++) { 423 writel_o(0, &fp->ring_hst_rx[i].rmc); 424 writel_o((fp->rx_dma[i] + 0x1000) >> 9, 425 &fp->ring_hst_rx[i].buffer1); 426 writel_o(fp->rx_dma[i] >> 9 | FZA_RING_OWN_FZA, 427 &fp->ring_hst_rx[i].buf0_own); 428 } 429 } 430 431 static void fza_set_rx_mode(struct net_device *dev) 432 { 433 fza_cmd_send(dev, FZA_RING_CMD_MODCAM); 434 fza_cmd_send(dev, FZA_RING_CMD_MODPROM); 435 } 436 437 union fza_buffer_txp { 438 struct fza_buffer_tx *data_ptr; 439 struct fza_buffer_tx __iomem *mmio_ptr; 440 }; 441 442 static int fza_do_xmit(union fza_buffer_txp ub, int len, 443 struct net_device *dev, int smt) 444 { 445 struct fza_private *fp = netdev_priv(dev); 446 struct fza_buffer_tx __iomem *rmc_tx_ptr; 447 int i, first, frag_len, left_len; 448 u32 own, rmc; 449 450 if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) - 451 fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) * 452 FZA_TX_BUFFER_SIZE) < len) 453 return 1; 454 455 first = fp->ring_rmc_tx_index; 456 457 left_len = len; 458 frag_len = FZA_TX_BUFFER_SIZE; 459 /* First descriptor is relinquished last. */ 460 own = FZA_RING_TX_OWN_HOST; 461 /* First descriptor carries frame length; we don't use cut-through. */ 462 rmc = FZA_RING_TX_SOP | FZA_RING_TX_VBC | len; 463 do { 464 i = fp->ring_rmc_tx_index; 465 rmc_tx_ptr = &fp->buffer_tx[i]; 466 467 if (left_len < FZA_TX_BUFFER_SIZE) 468 frag_len = left_len; 469 left_len -= frag_len; 470 471 /* Length must be a multiple of 4 as only word writes are 472 * permitted! 473 */ 474 frag_len = (frag_len + 3) & ~3; 475 if (smt) 476 fza_moves(ub.mmio_ptr, rmc_tx_ptr, frag_len); 477 else 478 fza_writes(ub.data_ptr, rmc_tx_ptr, frag_len); 479 480 if (left_len == 0) 481 rmc |= FZA_RING_TX_EOP; /* Mark last frag. */ 482 483 writel_o(rmc, &fp->ring_rmc_tx[i].rmc); 484 writel_o(own, &fp->ring_rmc_tx[i].own); 485 486 ub.data_ptr++; 487 fp->ring_rmc_tx_index = (fp->ring_rmc_tx_index + 1) % 488 fp->ring_rmc_tx_size; 489 490 /* Settings for intermediate frags. */ 491 own = FZA_RING_TX_OWN_RMC; 492 rmc = 0; 493 } while (left_len > 0); 494 495 if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) - 496 fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) * 497 FZA_TX_BUFFER_SIZE) < dev->mtu + dev->hard_header_len) { 498 netif_stop_queue(dev); 499 pr_debug("%s: queue stopped\n", fp->name); 500 } 501 502 writel_o(FZA_RING_TX_OWN_RMC, &fp->ring_rmc_tx[first].own); 503 504 /* Go, go, go! */ 505 writew_o(FZA_CONTROL_A_TX_POLL, &fp->regs->control_a); 506 507 return 0; 508 } 509 510 static int fza_do_recv_smt(struct fza_buffer_tx *data_ptr, int len, 511 u32 rmc, struct net_device *dev) 512 { 513 struct fza_private *fp = netdev_priv(dev); 514 struct fza_buffer_tx __iomem *smt_rx_ptr; 515 u32 own; 516 int i; 517 518 i = fp->ring_smt_rx_index; 519 own = readl_o(&fp->ring_smt_rx[i].own); 520 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 521 return 1; 522 523 smt_rx_ptr = fp->mmio + readl_u(&fp->ring_smt_rx[i].buffer); 524 525 /* Length must be a multiple of 4 as only word writes are permitted! */ 526 fza_writes(data_ptr, smt_rx_ptr, (len + 3) & ~3); 527 528 writel_o(rmc, &fp->ring_smt_rx[i].rmc); 529 writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_rx[i].own); 530 531 fp->ring_smt_rx_index = 532 (fp->ring_smt_rx_index + 1) % fp->ring_smt_rx_size; 533 534 /* Grab it! */ 535 writew_o(FZA_CONTROL_A_SMT_RX_POLL, &fp->regs->control_a); 536 537 return 0; 538 } 539 540 static void fza_tx(struct net_device *dev) 541 { 542 struct fza_private *fp = netdev_priv(dev); 543 u32 own, rmc; 544 int i; 545 546 while (1) { 547 i = fp->ring_rmc_txd_index; 548 if (i == fp->ring_rmc_tx_index) 549 break; 550 own = readl_o(&fp->ring_rmc_tx[i].own); 551 if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) 552 break; 553 554 rmc = readl_u(&fp->ring_rmc_tx[i].rmc); 555 /* Only process the first descriptor. */ 556 if ((rmc & FZA_RING_TX_SOP) != 0) { 557 if ((rmc & FZA_RING_TX_DCC_MASK) == 558 FZA_RING_TX_DCC_SUCCESS) { 559 int pkt_len = (rmc & FZA_RING_PBC_MASK) - 3; 560 /* Omit PRH. */ 561 562 fp->stats.tx_packets++; 563 fp->stats.tx_bytes += pkt_len; 564 } else { 565 fp->stats.tx_errors++; 566 switch (rmc & FZA_RING_TX_DCC_MASK) { 567 case FZA_RING_TX_DCC_DTP_SOP: 568 case FZA_RING_TX_DCC_DTP: 569 case FZA_RING_TX_DCC_ABORT: 570 fp->stats.tx_aborted_errors++; 571 break; 572 case FZA_RING_TX_DCC_UNDRRUN: 573 fp->stats.tx_fifo_errors++; 574 break; 575 case FZA_RING_TX_DCC_PARITY: 576 default: 577 break; 578 } 579 } 580 } 581 582 fp->ring_rmc_txd_index = (fp->ring_rmc_txd_index + 1) % 583 fp->ring_rmc_tx_size; 584 } 585 586 if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) - 587 fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) * 588 FZA_TX_BUFFER_SIZE) >= dev->mtu + dev->hard_header_len) { 589 if (fp->queue_active) { 590 netif_wake_queue(dev); 591 pr_debug("%s: queue woken\n", fp->name); 592 } 593 } 594 } 595 596 static inline int fza_rx_err(struct fza_private *fp, 597 const u32 rmc, const u8 fc) 598 { 599 int len, min_len, max_len; 600 601 len = rmc & FZA_RING_PBC_MASK; 602 603 if (unlikely((rmc & FZA_RING_RX_BAD) != 0)) { 604 fp->stats.rx_errors++; 605 606 /* Check special status codes. */ 607 if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK | 608 FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) == 609 (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR | 610 FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_ALIAS)) { 611 if (len >= 8190) 612 fp->stats.rx_length_errors++; 613 return 1; 614 } 615 if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK | 616 FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) == 617 (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR | 618 FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_CAM)) { 619 /* Halt the interface to trigger a reset. */ 620 writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a); 621 readw_o(&fp->regs->control_a); /* Synchronize. */ 622 return 1; 623 } 624 625 /* Check the MAC status. */ 626 switch (rmc & FZA_RING_RX_RRR_MASK) { 627 case FZA_RING_RX_RRR_OK: 628 if ((rmc & FZA_RING_RX_CRC) != 0) 629 fp->stats.rx_crc_errors++; 630 else if ((rmc & FZA_RING_RX_FSC_MASK) == 0 || 631 (rmc & FZA_RING_RX_FSB_ERR) != 0) 632 fp->stats.rx_frame_errors++; 633 return 1; 634 case FZA_RING_RX_RRR_SADDR: 635 case FZA_RING_RX_RRR_DADDR: 636 case FZA_RING_RX_RRR_ABORT: 637 /* Halt the interface to trigger a reset. */ 638 writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a); 639 readw_o(&fp->regs->control_a); /* Synchronize. */ 640 return 1; 641 case FZA_RING_RX_RRR_LENGTH: 642 fp->stats.rx_frame_errors++; 643 return 1; 644 default: 645 return 1; 646 } 647 } 648 649 /* Packet received successfully; validate the length. */ 650 switch (fc & FDDI_FC_K_FORMAT_MASK) { 651 case FDDI_FC_K_FORMAT_MANAGEMENT: 652 if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_ASYNC) 653 min_len = 37; 654 else 655 min_len = 17; 656 break; 657 case FDDI_FC_K_FORMAT_LLC: 658 min_len = 20; 659 break; 660 default: 661 min_len = 17; 662 break; 663 } 664 max_len = 4495; 665 if (len < min_len || len > max_len) { 666 fp->stats.rx_errors++; 667 fp->stats.rx_length_errors++; 668 return 1; 669 } 670 671 return 0; 672 } 673 674 static void fza_rx(struct net_device *dev) 675 { 676 struct fza_private *fp = netdev_priv(dev); 677 struct sk_buff *skb, *newskb; 678 struct fza_fddihdr *frame; 679 dma_addr_t dma, newdma; 680 u32 own, rmc, buf; 681 int i, len; 682 u8 fc; 683 684 while (1) { 685 i = fp->ring_hst_rx_index; 686 own = readl_o(&fp->ring_hst_rx[i].buf0_own); 687 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 688 break; 689 690 rmc = readl_u(&fp->ring_hst_rx[i].rmc); 691 skb = fp->rx_skbuff[i]; 692 dma = fp->rx_dma[i]; 693 694 /* The RMC doesn't count the preamble and the starting 695 * delimiter. We fix it up here for a total of 3 octets. 696 */ 697 dma_rmb(); 698 len = (rmc & FZA_RING_PBC_MASK) + 3; 699 frame = (struct fza_fddihdr *)skb->data; 700 701 /* We need to get at real FC. */ 702 dma_sync_single_for_cpu(fp->bdev, 703 dma + 704 ((u8 *)&frame->hdr.fc - (u8 *)frame), 705 sizeof(frame->hdr.fc), 706 DMA_FROM_DEVICE); 707 fc = frame->hdr.fc; 708 709 if (fza_rx_err(fp, rmc, fc)) 710 goto err_rx; 711 712 /* We have to 512-byte-align RX buffers... */ 713 newskb = fza_alloc_skb_irq(dev, FZA_RX_BUFFER_SIZE + 511); 714 if (newskb) { 715 fza_skb_align(newskb, 512); 716 newdma = dma_map_single(fp->bdev, newskb->data, 717 FZA_RX_BUFFER_SIZE, 718 DMA_FROM_DEVICE); 719 if (dma_mapping_error(fp->bdev, newdma)) { 720 dev_kfree_skb_irq(newskb); 721 newskb = NULL; 722 } 723 } 724 if (newskb) { 725 int pkt_len = len - 7; /* Omit P, SD and FCS. */ 726 int is_multi; 727 int rx_stat; 728 729 dma_unmap_single(fp->bdev, dma, FZA_RX_BUFFER_SIZE, 730 DMA_FROM_DEVICE); 731 732 /* Queue SMT frames to the SMT receive ring. */ 733 if ((fc & (FDDI_FC_K_CLASS_MASK | 734 FDDI_FC_K_FORMAT_MASK)) == 735 (FDDI_FC_K_CLASS_ASYNC | 736 FDDI_FC_K_FORMAT_MANAGEMENT) && 737 (rmc & FZA_RING_RX_DA_MASK) != 738 FZA_RING_RX_DA_PROM) { 739 if (fza_do_recv_smt((struct fza_buffer_tx *) 740 skb->data, len, rmc, 741 dev)) { 742 writel_o(FZA_CONTROL_A_SMT_RX_OVFL, 743 &fp->regs->control_a); 744 } 745 } 746 747 is_multi = ((frame->hdr.daddr[0] & 0x01) != 0); 748 749 skb_reserve(skb, 3); /* Skip over P and SD. */ 750 skb_put(skb, pkt_len); /* And cut off FCS. */ 751 skb->protocol = fddi_type_trans(skb, dev); 752 753 rx_stat = netif_rx(skb); 754 if (rx_stat != NET_RX_DROP) { 755 fp->stats.rx_packets++; 756 fp->stats.rx_bytes += pkt_len; 757 if (is_multi) 758 fp->stats.multicast++; 759 } else { 760 fp->stats.rx_dropped++; 761 } 762 763 skb = newskb; 764 dma = newdma; 765 fp->rx_skbuff[i] = skb; 766 fp->rx_dma[i] = dma; 767 } else { 768 fp->stats.rx_dropped++; 769 pr_notice("%s: memory squeeze, dropping packet\n", 770 fp->name); 771 } 772 773 err_rx: 774 writel_o(0, &fp->ring_hst_rx[i].rmc); 775 buf = (dma + 0x1000) >> 9; 776 writel_o(buf, &fp->ring_hst_rx[i].buffer1); 777 buf = dma >> 9 | FZA_RING_OWN_FZA; 778 writel_o(buf, &fp->ring_hst_rx[i].buf0_own); 779 fp->ring_hst_rx_index = 780 (fp->ring_hst_rx_index + 1) % fp->ring_hst_rx_size; 781 } 782 } 783 784 static void fza_tx_smt(struct net_device *dev) 785 { 786 struct fza_private *fp = netdev_priv(dev); 787 struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr; 788 int i, len; 789 u32 own; 790 791 while (1) { 792 i = fp->ring_smt_tx_index; 793 own = readl_o(&fp->ring_smt_tx[i].own); 794 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 795 break; 796 797 smt_tx_ptr = fp->mmio + readl_u(&fp->ring_smt_tx[i].buffer); 798 len = readl_u(&fp->ring_smt_tx[i].rmc) & FZA_RING_PBC_MASK; 799 800 if (!netif_queue_stopped(dev)) { 801 if (dev_nit_active(dev)) { 802 struct sk_buff *skb; 803 804 /* Length must be a multiple of 4 as only word 805 * reads are permitted! 806 */ 807 skb = fza_alloc_skb_irq(dev, (len + 3) & ~3); 808 if (!skb) 809 goto err_no_skb; /* Drop. */ 810 811 skb_data_ptr = (struct fza_buffer_tx *) 812 skb->data; 813 814 fza_reads(smt_tx_ptr, skb_data_ptr, 815 (len + 3) & ~3); 816 skb->dev = dev; 817 skb_reserve(skb, 3); /* Skip over PRH. */ 818 skb_put(skb, len - 3); 819 skb_reset_network_header(skb); 820 821 dev_queue_xmit_nit(skb, dev); 822 823 dev_kfree_skb_irq(skb); 824 825 err_no_skb: 826 ; 827 } 828 829 /* Queue the frame to the RMC transmit ring. */ 830 fza_do_xmit((union fza_buffer_txp) 831 { .mmio_ptr = smt_tx_ptr }, 832 len, dev, 1); 833 } 834 835 writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own); 836 fp->ring_smt_tx_index = 837 (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size; 838 } 839 } 840 841 static void fza_uns(struct net_device *dev) 842 { 843 struct fza_private *fp = netdev_priv(dev); 844 u32 own; 845 int i; 846 847 while (1) { 848 i = fp->ring_uns_index; 849 own = readl_o(&fp->ring_uns[i].own); 850 if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA) 851 break; 852 853 if (readl_u(&fp->ring_uns[i].id) == FZA_RING_UNS_RX_OVER) { 854 fp->stats.rx_errors++; 855 fp->stats.rx_over_errors++; 856 } 857 858 writel_o(FZA_RING_OWN_FZA, &fp->ring_uns[i].own); 859 fp->ring_uns_index = 860 (fp->ring_uns_index + 1) % FZA_RING_UNS_SIZE; 861 } 862 } 863 864 static void fza_tx_flush(struct net_device *dev) 865 { 866 struct fza_private *fp = netdev_priv(dev); 867 u32 own; 868 int i; 869 870 /* Clean up the SMT TX ring. */ 871 i = fp->ring_smt_tx_index; 872 do { 873 writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own); 874 fp->ring_smt_tx_index = 875 (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size; 876 877 } while (i != fp->ring_smt_tx_index); 878 879 /* Clean up the RMC TX ring. */ 880 i = fp->ring_rmc_tx_index; 881 do { 882 own = readl_o(&fp->ring_rmc_tx[i].own); 883 if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) { 884 u32 rmc = readl_u(&fp->ring_rmc_tx[i].rmc); 885 886 writel_u(rmc | FZA_RING_TX_DTP, 887 &fp->ring_rmc_tx[i].rmc); 888 } 889 fp->ring_rmc_tx_index = 890 (fp->ring_rmc_tx_index + 1) % fp->ring_rmc_tx_size; 891 892 } while (i != fp->ring_rmc_tx_index); 893 894 /* Done. */ 895 writew_o(FZA_CONTROL_A_FLUSH_DONE, &fp->regs->control_a); 896 } 897 898 static irqreturn_t fza_interrupt(int irq, void *dev_id) 899 { 900 struct net_device *dev = dev_id; 901 struct fza_private *fp = netdev_priv(dev); 902 uint int_event; 903 904 /* Get interrupt events. */ 905 int_event = readw_o(&fp->regs->int_event) & fp->int_mask; 906 if (int_event == 0) 907 return IRQ_NONE; 908 909 /* Clear the events. */ 910 writew_u(int_event, &fp->regs->int_event); 911 912 /* Now handle the events. The order matters. */ 913 914 /* Command finished interrupt. */ 915 if ((int_event & FZA_EVENT_CMD_DONE) != 0) { 916 fp->irq_count_cmd_done++; 917 918 spin_lock(&fp->lock); 919 fp->cmd_done_flag = 1; 920 wake_up(&fp->cmd_done_wait); 921 spin_unlock(&fp->lock); 922 } 923 924 /* Transmit finished interrupt. */ 925 if ((int_event & FZA_EVENT_TX_DONE) != 0) { 926 fp->irq_count_tx_done++; 927 fza_tx(dev); 928 } 929 930 /* Host receive interrupt. */ 931 if ((int_event & FZA_EVENT_RX_POLL) != 0) { 932 fp->irq_count_rx_poll++; 933 fza_rx(dev); 934 } 935 936 /* SMT transmit interrupt. */ 937 if ((int_event & FZA_EVENT_SMT_TX_POLL) != 0) { 938 fp->irq_count_smt_tx_poll++; 939 fza_tx_smt(dev); 940 } 941 942 /* Transmit ring flush request. */ 943 if ((int_event & FZA_EVENT_FLUSH_TX) != 0) { 944 fp->irq_count_flush_tx++; 945 fza_tx_flush(dev); 946 } 947 948 /* Link status change interrupt. */ 949 if ((int_event & FZA_EVENT_LINK_ST_CHG) != 0) { 950 uint status; 951 952 fp->irq_count_link_st_chg++; 953 status = readw_u(&fp->regs->status); 954 if (FZA_STATUS_GET_LINK(status) == FZA_LINK_ON) { 955 netif_carrier_on(dev); 956 pr_info("%s: link available\n", fp->name); 957 } else { 958 netif_carrier_off(dev); 959 pr_info("%s: link unavailable\n", fp->name); 960 } 961 } 962 963 /* Unsolicited event interrupt. */ 964 if ((int_event & FZA_EVENT_UNS_POLL) != 0) { 965 fp->irq_count_uns_poll++; 966 fza_uns(dev); 967 } 968 969 /* State change interrupt. */ 970 if ((int_event & FZA_EVENT_STATE_CHG) != 0) { 971 uint status, state; 972 973 fp->irq_count_state_chg++; 974 975 status = readw_u(&fp->regs->status); 976 state = FZA_STATUS_GET_STATE(status); 977 pr_debug("%s: state change: %x\n", fp->name, state); 978 switch (state) { 979 case FZA_STATE_RESET: 980 break; 981 982 case FZA_STATE_UNINITIALIZED: 983 netif_carrier_off(dev); 984 del_timer_sync(&fp->reset_timer); 985 fp->ring_cmd_index = 0; 986 fp->ring_uns_index = 0; 987 fp->ring_rmc_tx_index = 0; 988 fp->ring_rmc_txd_index = 0; 989 fp->ring_hst_rx_index = 0; 990 fp->ring_smt_tx_index = 0; 991 fp->ring_smt_rx_index = 0; 992 if (fp->state > state) { 993 pr_info("%s: OK\n", fp->name); 994 fza_cmd_send(dev, FZA_RING_CMD_INIT); 995 } 996 break; 997 998 case FZA_STATE_INITIALIZED: 999 if (fp->state > state) { 1000 fza_set_rx_mode(dev); 1001 fza_cmd_send(dev, FZA_RING_CMD_PARAM); 1002 } 1003 break; 1004 1005 case FZA_STATE_RUNNING: 1006 case FZA_STATE_MAINTENANCE: 1007 fp->state = state; 1008 fza_rx_init(fp); 1009 fp->queue_active = 1; 1010 netif_wake_queue(dev); 1011 pr_debug("%s: queue woken\n", fp->name); 1012 break; 1013 1014 case FZA_STATE_HALTED: 1015 fp->queue_active = 0; 1016 netif_stop_queue(dev); 1017 pr_debug("%s: queue stopped\n", fp->name); 1018 del_timer_sync(&fp->reset_timer); 1019 pr_warn("%s: halted, reason: %x\n", fp->name, 1020 FZA_STATUS_GET_HALT(status)); 1021 fza_regs_dump(fp); 1022 pr_info("%s: resetting the board...\n", fp->name); 1023 fza_do_reset(fp); 1024 fp->timer_state = 0; 1025 fp->reset_timer.expires = jiffies + 45 * HZ; 1026 add_timer(&fp->reset_timer); 1027 break; 1028 1029 default: 1030 pr_warn("%s: undefined state: %x\n", fp->name, state); 1031 break; 1032 } 1033 1034 spin_lock(&fp->lock); 1035 fp->state_chg_flag = 1; 1036 wake_up(&fp->state_chg_wait); 1037 spin_unlock(&fp->lock); 1038 } 1039 1040 return IRQ_HANDLED; 1041 } 1042 1043 static void fza_reset_timer(struct timer_list *t) 1044 { 1045 struct fza_private *fp = from_timer(fp, t, reset_timer); 1046 1047 if (!fp->timer_state) { 1048 pr_err("%s: RESET timed out!\n", fp->name); 1049 pr_info("%s: trying harder...\n", fp->name); 1050 1051 /* Assert the board reset. */ 1052 writew_o(FZA_RESET_INIT, &fp->regs->reset); 1053 readw_o(&fp->regs->reset); /* Synchronize. */ 1054 1055 fp->timer_state = 1; 1056 fp->reset_timer.expires = jiffies + HZ; 1057 } else { 1058 /* Clear the board reset. */ 1059 writew_u(FZA_RESET_CLR, &fp->regs->reset); 1060 1061 /* Enable all interrupt events we handle. */ 1062 writew_o(fp->int_mask, &fp->regs->int_mask); 1063 readw_o(&fp->regs->int_mask); /* Synchronize. */ 1064 1065 fp->timer_state = 0; 1066 fp->reset_timer.expires = jiffies + 45 * HZ; 1067 } 1068 add_timer(&fp->reset_timer); 1069 } 1070 1071 static int fza_set_mac_address(struct net_device *dev, void *addr) 1072 { 1073 return -EOPNOTSUPP; 1074 } 1075 1076 static netdev_tx_t fza_start_xmit(struct sk_buff *skb, struct net_device *dev) 1077 { 1078 struct fza_private *fp = netdev_priv(dev); 1079 unsigned int old_mask, new_mask; 1080 int ret; 1081 u8 fc; 1082 1083 skb_push(skb, 3); /* Make room for PRH. */ 1084 1085 /* Decode FC to set PRH. */ 1086 fc = skb->data[3]; 1087 skb->data[0] = 0; 1088 skb->data[1] = 0; 1089 skb->data[2] = FZA_PRH2_NORMAL; 1090 if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_SYNC) 1091 skb->data[0] |= FZA_PRH0_FRAME_SYNC; 1092 switch (fc & FDDI_FC_K_FORMAT_MASK) { 1093 case FDDI_FC_K_FORMAT_MANAGEMENT: 1094 if ((fc & FDDI_FC_K_CONTROL_MASK) == 0) { 1095 /* Token. */ 1096 skb->data[0] |= FZA_PRH0_TKN_TYPE_IMM; 1097 skb->data[1] |= FZA_PRH1_TKN_SEND_NONE; 1098 } else { 1099 /* SMT or MAC. */ 1100 skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR; 1101 skb->data[1] |= FZA_PRH1_TKN_SEND_UNR; 1102 } 1103 skb->data[1] |= FZA_PRH1_CRC_NORMAL; 1104 break; 1105 case FDDI_FC_K_FORMAT_LLC: 1106 case FDDI_FC_K_FORMAT_FUTURE: 1107 skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR; 1108 skb->data[1] |= FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR; 1109 break; 1110 case FDDI_FC_K_FORMAT_IMPLEMENTOR: 1111 skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR; 1112 skb->data[1] |= FZA_PRH1_TKN_SEND_ORIG; 1113 break; 1114 } 1115 1116 /* SMT transmit interrupts may sneak frames into the RMC 1117 * transmit ring. We disable them while queueing a frame 1118 * to maintain consistency. 1119 */ 1120 old_mask = fp->int_mask; 1121 new_mask = old_mask & ~FZA_MASK_SMT_TX_POLL; 1122 writew_u(new_mask, &fp->regs->int_mask); 1123 readw_o(&fp->regs->int_mask); /* Synchronize. */ 1124 fp->int_mask = new_mask; 1125 ret = fza_do_xmit((union fza_buffer_txp) 1126 { .data_ptr = (struct fza_buffer_tx *)skb->data }, 1127 skb->len, dev, 0); 1128 fp->int_mask = old_mask; 1129 writew_u(fp->int_mask, &fp->regs->int_mask); 1130 1131 if (ret) { 1132 /* Probably an SMT packet filled the remaining space, 1133 * so just stop the queue, but don't report it as an error. 1134 */ 1135 netif_stop_queue(dev); 1136 pr_debug("%s: queue stopped\n", fp->name); 1137 fp->stats.tx_dropped++; 1138 } 1139 1140 dev_kfree_skb(skb); 1141 1142 return ret; 1143 } 1144 1145 static int fza_open(struct net_device *dev) 1146 { 1147 struct fza_private *fp = netdev_priv(dev); 1148 struct fza_ring_cmd __iomem *ring; 1149 struct sk_buff *skb; 1150 unsigned long flags; 1151 dma_addr_t dma; 1152 int ret, i; 1153 u32 stat; 1154 long t; 1155 1156 for (i = 0; i < FZA_RING_RX_SIZE; i++) { 1157 /* We have to 512-byte-align RX buffers... */ 1158 skb = fza_alloc_skb(dev, FZA_RX_BUFFER_SIZE + 511); 1159 if (skb) { 1160 fza_skb_align(skb, 512); 1161 dma = dma_map_single(fp->bdev, skb->data, 1162 FZA_RX_BUFFER_SIZE, 1163 DMA_FROM_DEVICE); 1164 if (dma_mapping_error(fp->bdev, dma)) { 1165 dev_kfree_skb(skb); 1166 skb = NULL; 1167 } 1168 } 1169 if (!skb) { 1170 for (--i; i >= 0; i--) { 1171 dma_unmap_single(fp->bdev, fp->rx_dma[i], 1172 FZA_RX_BUFFER_SIZE, 1173 DMA_FROM_DEVICE); 1174 dev_kfree_skb(fp->rx_skbuff[i]); 1175 fp->rx_dma[i] = 0; 1176 fp->rx_skbuff[i] = NULL; 1177 } 1178 return -ENOMEM; 1179 } 1180 fp->rx_skbuff[i] = skb; 1181 fp->rx_dma[i] = dma; 1182 } 1183 1184 ret = fza_init_send(dev, NULL); 1185 if (ret != 0) 1186 return ret; 1187 1188 /* Purger and Beacon multicasts need to be supplied before PARAM. */ 1189 fza_set_rx_mode(dev); 1190 1191 spin_lock_irqsave(&fp->lock, flags); 1192 fp->cmd_done_flag = 0; 1193 ring = fza_cmd_send(dev, FZA_RING_CMD_PARAM); 1194 spin_unlock_irqrestore(&fp->lock, flags); 1195 if (!ring) 1196 return -ENOBUFS; 1197 1198 t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ); 1199 if (fp->cmd_done_flag == 0) { 1200 pr_err("%s: PARAM command timed out!, state %x\n", fp->name, 1201 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 1202 return -EIO; 1203 } 1204 stat = readl_u(&ring->stat); 1205 if (stat != FZA_RING_STAT_SUCCESS) { 1206 pr_err("%s: PARAM command failed!, status %02x, state %x\n", 1207 fp->name, stat, 1208 FZA_STATUS_GET_STATE(readw_u(&fp->regs->status))); 1209 return -EIO; 1210 } 1211 pr_debug("%s: PARAM: %lums elapsed\n", fp->name, 1212 (3 * HZ - t) * 1000 / HZ); 1213 1214 return 0; 1215 } 1216 1217 static int fza_close(struct net_device *dev) 1218 { 1219 struct fza_private *fp = netdev_priv(dev); 1220 unsigned long flags; 1221 uint state; 1222 long t; 1223 int i; 1224 1225 netif_stop_queue(dev); 1226 pr_debug("%s: queue stopped\n", fp->name); 1227 1228 del_timer_sync(&fp->reset_timer); 1229 spin_lock_irqsave(&fp->lock, flags); 1230 fp->state = FZA_STATE_UNINITIALIZED; 1231 fp->state_chg_flag = 0; 1232 /* Shut the interface down. */ 1233 writew_o(FZA_CONTROL_A_SHUT, &fp->regs->control_a); 1234 readw_o(&fp->regs->control_a); /* Synchronize. */ 1235 spin_unlock_irqrestore(&fp->lock, flags); 1236 1237 /* DEC says SHUT needs up to 10 seconds to complete. */ 1238 t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag, 1239 15 * HZ); 1240 state = FZA_STATUS_GET_STATE(readw_o(&fp->regs->status)); 1241 if (fp->state_chg_flag == 0) { 1242 pr_err("%s: SHUT timed out!, state %x\n", fp->name, state); 1243 return -EIO; 1244 } 1245 if (state != FZA_STATE_UNINITIALIZED) { 1246 pr_err("%s: SHUT failed!, state %x\n", fp->name, state); 1247 return -EIO; 1248 } 1249 pr_debug("%s: SHUT: %lums elapsed\n", fp->name, 1250 (15 * HZ - t) * 1000 / HZ); 1251 1252 for (i = 0; i < FZA_RING_RX_SIZE; i++) 1253 if (fp->rx_skbuff[i]) { 1254 dma_unmap_single(fp->bdev, fp->rx_dma[i], 1255 FZA_RX_BUFFER_SIZE, DMA_FROM_DEVICE); 1256 dev_kfree_skb(fp->rx_skbuff[i]); 1257 fp->rx_dma[i] = 0; 1258 fp->rx_skbuff[i] = NULL; 1259 } 1260 1261 return 0; 1262 } 1263 1264 static struct net_device_stats *fza_get_stats(struct net_device *dev) 1265 { 1266 struct fza_private *fp = netdev_priv(dev); 1267 1268 return &fp->stats; 1269 } 1270 1271 static int fza_probe(struct device *bdev) 1272 { 1273 static const struct net_device_ops netdev_ops = { 1274 .ndo_open = fza_open, 1275 .ndo_stop = fza_close, 1276 .ndo_start_xmit = fza_start_xmit, 1277 .ndo_set_rx_mode = fza_set_rx_mode, 1278 .ndo_set_mac_address = fza_set_mac_address, 1279 .ndo_get_stats = fza_get_stats, 1280 }; 1281 static int version_printed; 1282 char rom_rev[4], fw_rev[4], rmc_rev[4]; 1283 struct tc_dev *tdev = to_tc_dev(bdev); 1284 struct fza_cmd_init __iomem *init; 1285 resource_size_t start, len; 1286 struct net_device *dev; 1287 struct fza_private *fp; 1288 uint smt_ver, pmd_type; 1289 void __iomem *mmio; 1290 uint hw_addr[2]; 1291 int ret, i; 1292 1293 if (!version_printed) { 1294 pr_info("%s", version); 1295 version_printed = 1; 1296 } 1297 1298 dev = alloc_fddidev(sizeof(*fp)); 1299 if (!dev) 1300 return -ENOMEM; 1301 SET_NETDEV_DEV(dev, bdev); 1302 1303 fp = netdev_priv(dev); 1304 dev_set_drvdata(bdev, dev); 1305 1306 fp->bdev = bdev; 1307 fp->name = dev_name(bdev); 1308 1309 /* Request the I/O MEM resource. */ 1310 start = tdev->resource.start; 1311 len = tdev->resource.end - start + 1; 1312 if (!request_mem_region(start, len, dev_name(bdev))) { 1313 pr_err("%s: cannot reserve MMIO region\n", fp->name); 1314 ret = -EBUSY; 1315 goto err_out_kfree; 1316 } 1317 1318 /* MMIO mapping setup. */ 1319 mmio = ioremap_nocache(start, len); 1320 if (!mmio) { 1321 pr_err("%s: cannot map MMIO\n", fp->name); 1322 ret = -ENOMEM; 1323 goto err_out_resource; 1324 } 1325 1326 /* Initialize the new device structure. */ 1327 switch (loopback) { 1328 case FZA_LOOP_NORMAL: 1329 case FZA_LOOP_INTERN: 1330 case FZA_LOOP_EXTERN: 1331 break; 1332 default: 1333 loopback = FZA_LOOP_NORMAL; 1334 } 1335 1336 fp->mmio = mmio; 1337 dev->irq = tdev->interrupt; 1338 1339 pr_info("%s: DEC FDDIcontroller 700 or 700-C at 0x%08llx, irq %d\n", 1340 fp->name, (long long)tdev->resource.start, dev->irq); 1341 pr_debug("%s: mapped at: 0x%p\n", fp->name, mmio); 1342 1343 fp->regs = mmio + FZA_REG_BASE; 1344 fp->ring_cmd = mmio + FZA_RING_CMD; 1345 fp->ring_uns = mmio + FZA_RING_UNS; 1346 1347 init_waitqueue_head(&fp->state_chg_wait); 1348 init_waitqueue_head(&fp->cmd_done_wait); 1349 spin_lock_init(&fp->lock); 1350 fp->int_mask = FZA_MASK_NORMAL; 1351 1352 timer_setup(&fp->reset_timer, fza_reset_timer, 0); 1353 1354 /* Sanitize the board. */ 1355 fza_regs_dump(fp); 1356 fza_do_shutdown(fp); 1357 1358 ret = request_irq(dev->irq, fza_interrupt, IRQF_SHARED, fp->name, dev); 1359 if (ret != 0) { 1360 pr_err("%s: unable to get IRQ %d!\n", fp->name, dev->irq); 1361 goto err_out_map; 1362 } 1363 1364 /* Enable the driver mode. */ 1365 writew_o(FZA_CONTROL_B_DRIVER, &fp->regs->control_b); 1366 1367 /* For some reason transmit done interrupts can trigger during 1368 * reset. This avoids a division error in the handler. 1369 */ 1370 fp->ring_rmc_tx_size = FZA_RING_TX_SIZE; 1371 1372 ret = fza_reset(fp); 1373 if (ret != 0) 1374 goto err_out_irq; 1375 1376 ret = fza_init_send(dev, &init); 1377 if (ret != 0) 1378 goto err_out_irq; 1379 1380 fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr)); 1381 memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN); 1382 1383 fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev)); 1384 fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev)); 1385 fza_reads(&init->rmc_rev, &rmc_rev, sizeof(rmc_rev)); 1386 for (i = 3; i >= 0 && rom_rev[i] == ' '; i--) 1387 rom_rev[i] = 0; 1388 for (i = 3; i >= 0 && fw_rev[i] == ' '; i--) 1389 fw_rev[i] = 0; 1390 for (i = 3; i >= 0 && rmc_rev[i] == ' '; i--) 1391 rmc_rev[i] = 0; 1392 1393 fp->ring_rmc_tx = mmio + readl_u(&init->rmc_tx); 1394 fp->ring_rmc_tx_size = readl_u(&init->rmc_tx_size); 1395 fp->ring_hst_rx = mmio + readl_u(&init->hst_rx); 1396 fp->ring_hst_rx_size = readl_u(&init->hst_rx_size); 1397 fp->ring_smt_tx = mmio + readl_u(&init->smt_tx); 1398 fp->ring_smt_tx_size = readl_u(&init->smt_tx_size); 1399 fp->ring_smt_rx = mmio + readl_u(&init->smt_rx); 1400 fp->ring_smt_rx_size = readl_u(&init->smt_rx_size); 1401 1402 fp->buffer_tx = mmio + FZA_TX_BUFFER_ADDR(readl_u(&init->rmc_tx)); 1403 1404 fp->t_max = readl_u(&init->def_t_max); 1405 fp->t_req = readl_u(&init->def_t_req); 1406 fp->tvx = readl_u(&init->def_tvx); 1407 fp->lem_threshold = readl_u(&init->lem_threshold); 1408 fza_reads(&init->def_station_id, &fp->station_id, 1409 sizeof(fp->station_id)); 1410 fp->rtoken_timeout = readl_u(&init->rtoken_timeout); 1411 fp->ring_purger = readl_u(&init->ring_purger); 1412 1413 smt_ver = readl_u(&init->smt_ver); 1414 pmd_type = readl_u(&init->pmd_type); 1415 1416 pr_debug("%s: INIT parameters:\n", fp->name); 1417 pr_debug(" tx_mode: %u\n", readl_u(&init->tx_mode)); 1418 pr_debug(" hst_rx_size: %u\n", readl_u(&init->hst_rx_size)); 1419 pr_debug(" rmc_rev: %.4s\n", rmc_rev); 1420 pr_debug(" rom_rev: %.4s\n", rom_rev); 1421 pr_debug(" fw_rev: %.4s\n", fw_rev); 1422 pr_debug(" mop_type: %u\n", readl_u(&init->mop_type)); 1423 pr_debug(" hst_rx: 0x%08x\n", readl_u(&init->hst_rx)); 1424 pr_debug(" rmc_tx: 0x%08x\n", readl_u(&init->rmc_tx)); 1425 pr_debug(" rmc_tx_size: %u\n", readl_u(&init->rmc_tx_size)); 1426 pr_debug(" smt_tx: 0x%08x\n", readl_u(&init->smt_tx)); 1427 pr_debug(" smt_tx_size: %u\n", readl_u(&init->smt_tx_size)); 1428 pr_debug(" smt_rx: 0x%08x\n", readl_u(&init->smt_rx)); 1429 pr_debug(" smt_rx_size: %u\n", readl_u(&init->smt_rx_size)); 1430 /* TC systems are always LE, so don't bother swapping. */ 1431 pr_debug(" hw_addr: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 1432 (readl_u(&init->hw_addr[0]) >> 0) & 0xff, 1433 (readl_u(&init->hw_addr[0]) >> 8) & 0xff, 1434 (readl_u(&init->hw_addr[0]) >> 16) & 0xff, 1435 (readl_u(&init->hw_addr[0]) >> 24) & 0xff, 1436 (readl_u(&init->hw_addr[1]) >> 0) & 0xff, 1437 (readl_u(&init->hw_addr[1]) >> 8) & 0xff, 1438 (readl_u(&init->hw_addr[1]) >> 16) & 0xff, 1439 (readl_u(&init->hw_addr[1]) >> 24) & 0xff); 1440 pr_debug(" def_t_req: %u\n", readl_u(&init->def_t_req)); 1441 pr_debug(" def_tvx: %u\n", readl_u(&init->def_tvx)); 1442 pr_debug(" def_t_max: %u\n", readl_u(&init->def_t_max)); 1443 pr_debug(" lem_threshold: %u\n", readl_u(&init->lem_threshold)); 1444 /* Don't bother swapping, see above. */ 1445 pr_debug(" def_station_id: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 1446 (readl_u(&init->def_station_id[0]) >> 0) & 0xff, 1447 (readl_u(&init->def_station_id[0]) >> 8) & 0xff, 1448 (readl_u(&init->def_station_id[0]) >> 16) & 0xff, 1449 (readl_u(&init->def_station_id[0]) >> 24) & 0xff, 1450 (readl_u(&init->def_station_id[1]) >> 0) & 0xff, 1451 (readl_u(&init->def_station_id[1]) >> 8) & 0xff, 1452 (readl_u(&init->def_station_id[1]) >> 16) & 0xff, 1453 (readl_u(&init->def_station_id[1]) >> 24) & 0xff); 1454 pr_debug(" pmd_type_alt: %u\n", readl_u(&init->pmd_type_alt)); 1455 pr_debug(" smt_ver: %u\n", readl_u(&init->smt_ver)); 1456 pr_debug(" rtoken_timeout: %u\n", readl_u(&init->rtoken_timeout)); 1457 pr_debug(" ring_purger: %u\n", readl_u(&init->ring_purger)); 1458 pr_debug(" smt_ver_max: %u\n", readl_u(&init->smt_ver_max)); 1459 pr_debug(" smt_ver_min: %u\n", readl_u(&init->smt_ver_min)); 1460 pr_debug(" pmd_type: %u\n", readl_u(&init->pmd_type)); 1461 1462 pr_info("%s: model %s, address %pMF\n", 1463 fp->name, 1464 pmd_type == FZA_PMD_TYPE_TW ? 1465 "700-C (DEFZA-CA), ThinWire PMD selected" : 1466 pmd_type == FZA_PMD_TYPE_STP ? 1467 "700-C (DEFZA-CA), STP PMD selected" : 1468 "700 (DEFZA-AA), MMF PMD", 1469 dev->dev_addr); 1470 pr_info("%s: ROM rev. %.4s, firmware rev. %.4s, RMC rev. %.4s, " 1471 "SMT ver. %u\n", fp->name, rom_rev, fw_rev, rmc_rev, smt_ver); 1472 1473 /* Now that we fetched initial parameters just shut the interface 1474 * until opened. 1475 */ 1476 ret = fza_close(dev); 1477 if (ret != 0) 1478 goto err_out_irq; 1479 1480 /* The FZA-specific entries in the device structure. */ 1481 dev->netdev_ops = &netdev_ops; 1482 1483 ret = register_netdev(dev); 1484 if (ret != 0) 1485 goto err_out_irq; 1486 1487 pr_info("%s: registered as %s\n", fp->name, dev->name); 1488 fp->name = (const char *)dev->name; 1489 1490 get_device(bdev); 1491 return 0; 1492 1493 err_out_irq: 1494 del_timer_sync(&fp->reset_timer); 1495 fza_do_shutdown(fp); 1496 free_irq(dev->irq, dev); 1497 1498 err_out_map: 1499 iounmap(mmio); 1500 1501 err_out_resource: 1502 release_mem_region(start, len); 1503 1504 err_out_kfree: 1505 free_netdev(dev); 1506 1507 pr_err("%s: initialization failure, aborting!\n", fp->name); 1508 return ret; 1509 } 1510 1511 static int fza_remove(struct device *bdev) 1512 { 1513 struct net_device *dev = dev_get_drvdata(bdev); 1514 struct fza_private *fp = netdev_priv(dev); 1515 struct tc_dev *tdev = to_tc_dev(bdev); 1516 resource_size_t start, len; 1517 1518 put_device(bdev); 1519 1520 unregister_netdev(dev); 1521 1522 del_timer_sync(&fp->reset_timer); 1523 fza_do_shutdown(fp); 1524 free_irq(dev->irq, dev); 1525 1526 iounmap(fp->mmio); 1527 1528 start = tdev->resource.start; 1529 len = tdev->resource.end - start + 1; 1530 release_mem_region(start, len); 1531 1532 free_netdev(dev); 1533 1534 return 0; 1535 } 1536 1537 static struct tc_device_id const fza_tc_table[] = { 1538 { "DEC ", "PMAF-AA " }, 1539 { } 1540 }; 1541 MODULE_DEVICE_TABLE(tc, fza_tc_table); 1542 1543 static struct tc_driver fza_driver = { 1544 .id_table = fza_tc_table, 1545 .driver = { 1546 .name = "defza", 1547 .bus = &tc_bus_type, 1548 .probe = fza_probe, 1549 .remove = fza_remove, 1550 }, 1551 }; 1552 1553 static int fza_init(void) 1554 { 1555 return tc_register_driver(&fza_driver); 1556 } 1557 1558 static void fza_exit(void) 1559 { 1560 tc_unregister_driver(&fza_driver); 1561 } 1562 1563 module_init(fza_init); 1564 module_exit(fza_exit); 1565