1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Hitachi SCA HD64570 driver for Linux 4 * 5 * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * Source of information: Hitachi HD64570 SCA User's Manual 8 * 9 * We use the following SCA memory map: 10 * 11 * Packet buffer descriptor rings - starting from winbase or win0base: 12 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring 13 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring 14 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used) 15 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used) 16 * 17 * Packet data buffers - starting from winbase + buff_offset: 18 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers 19 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers 20 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used) 21 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used) 22 */ 23 24 #include <linux/bitops.h> 25 #include <linux/errno.h> 26 #include <linux/fcntl.h> 27 #include <linux/hdlc.h> 28 #include <linux/in.h> 29 #include <linux/interrupt.h> 30 #include <linux/ioport.h> 31 #include <linux/jiffies.h> 32 #include <linux/kernel.h> 33 #include <linux/module.h> 34 #include <linux/netdevice.h> 35 #include <linux/skbuff.h> 36 #include <linux/string.h> 37 #include <linux/types.h> 38 #include <asm/io.h> 39 #include <linux/uaccess.h> 40 #include "hd64570.h" 41 42 #define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) 43 #define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET) 44 #define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET) 45 46 #define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01) 47 #define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02) 48 #define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04) 49 50 static inline struct net_device *port_to_dev(port_t *port) 51 { 52 return port->dev; 53 } 54 55 static inline int sca_intr_status(card_t *card) 56 { 57 u8 result = 0; 58 u8 isr0 = sca_in(ISR0, card); 59 u8 isr1 = sca_in(ISR1, card); 60 61 if (isr1 & 0x03) 62 result |= SCA_INTR_DMAC_RX(0); 63 if (isr1 & 0x0C) 64 result |= SCA_INTR_DMAC_TX(0); 65 if (isr1 & 0x30) 66 result |= SCA_INTR_DMAC_RX(1); 67 if (isr1 & 0xC0) 68 result |= SCA_INTR_DMAC_TX(1); 69 if (isr0 & 0x0F) 70 result |= SCA_INTR_MSCI(0); 71 if (isr0 & 0xF0) 72 result |= SCA_INTR_MSCI(1); 73 74 if (!(result & SCA_INTR_DMAC_TX(0))) 75 if (sca_in(DSR_TX(0), card) & DSR_EOM) 76 result |= SCA_INTR_DMAC_TX(0); 77 if (!(result & SCA_INTR_DMAC_TX(1))) 78 if (sca_in(DSR_TX(1), card) & DSR_EOM) 79 result |= SCA_INTR_DMAC_TX(1); 80 81 return result; 82 } 83 84 static inline port_t *dev_to_port(struct net_device *dev) 85 { 86 return dev_to_hdlc(dev)->priv; 87 } 88 89 static inline u16 next_desc(port_t *port, u16 desc, int transmit) 90 { 91 return (desc + 1) % (transmit ? port_to_card(port)->tx_ring_buffers 92 : port_to_card(port)->rx_ring_buffers); 93 } 94 95 static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit) 96 { 97 u16 rx_buffs = port_to_card(port)->rx_ring_buffers; 98 u16 tx_buffs = port_to_card(port)->tx_ring_buffers; 99 100 desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc. 101 return log_node(port) * (rx_buffs + tx_buffs) + 102 transmit * rx_buffs + desc; 103 } 104 105 static inline u16 desc_offset(port_t *port, u16 desc, int transmit) 106 { 107 /* Descriptor offset always fits in 16 bits */ 108 return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc); 109 } 110 111 static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc, 112 int transmit) 113 { 114 #ifdef PAGE0_ALWAYS_MAPPED 115 return (pkt_desc __iomem *)(win0base(port_to_card(port)) 116 + desc_offset(port, desc, transmit)); 117 #else 118 return (pkt_desc __iomem *)(winbase(port_to_card(port)) 119 + desc_offset(port, desc, transmit)); 120 #endif 121 } 122 123 static inline u32 buffer_offset(port_t *port, u16 desc, int transmit) 124 { 125 return port_to_card(port)->buff_offset + 126 desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU; 127 } 128 129 static inline void sca_set_carrier(port_t *port) 130 { 131 if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) { 132 #ifdef DEBUG_LINK 133 printk(KERN_DEBUG "%s: sca_set_carrier on\n", 134 port_to_dev(port)->name); 135 #endif 136 netif_carrier_on(port_to_dev(port)); 137 } else { 138 #ifdef DEBUG_LINK 139 printk(KERN_DEBUG "%s: sca_set_carrier off\n", 140 port_to_dev(port)->name); 141 #endif 142 netif_carrier_off(port_to_dev(port)); 143 } 144 } 145 146 static void sca_init_port(port_t *port) 147 { 148 card_t *card = port_to_card(port); 149 int transmit, i; 150 151 port->rxin = 0; 152 port->txin = 0; 153 port->txlast = 0; 154 155 #ifndef PAGE0_ALWAYS_MAPPED 156 openwin(card, 0); 157 #endif 158 159 for (transmit = 0; transmit < 2; transmit++) { 160 u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port); 161 u16 buffs = transmit ? card->tx_ring_buffers 162 : card->rx_ring_buffers; 163 164 for (i = 0; i < buffs; i++) { 165 pkt_desc __iomem *desc = desc_address(port, i, transmit); 166 u16 chain_off = desc_offset(port, i + 1, transmit); 167 u32 buff_off = buffer_offset(port, i, transmit); 168 169 writew(chain_off, &desc->cp); 170 writel(buff_off, &desc->bp); 171 writew(0, &desc->len); 172 writeb(0, &desc->stat); 173 } 174 175 /* DMA disable - to halt state */ 176 sca_out(0, transmit ? DSR_TX(phy_node(port)) : 177 DSR_RX(phy_node(port)), card); 178 /* software ABORT - to initial state */ 179 sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) : 180 DCR_RX(phy_node(port)), card); 181 182 /* current desc addr */ 183 sca_out(0, dmac + CPB, card); /* pointer base */ 184 sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card); 185 if (!transmit) 186 sca_outw(desc_offset(port, buffs - 1, transmit), 187 dmac + EDAL, card); 188 else 189 sca_outw(desc_offset(port, 0, transmit), dmac + EDAL, 190 card); 191 192 /* clear frame end interrupt counter */ 193 sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) : 194 DCR_RX(phy_node(port)), card); 195 196 if (!transmit) { /* Receive */ 197 /* set buffer length */ 198 sca_outw(HDLC_MAX_MRU, dmac + BFLL, card); 199 /* Chain mode, Multi-frame */ 200 sca_out(0x14, DMR_RX(phy_node(port)), card); 201 sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)), 202 card); 203 /* DMA enable */ 204 sca_out(DSR_DE, DSR_RX(phy_node(port)), card); 205 } else { /* Transmit */ 206 /* Chain mode, Multi-frame */ 207 sca_out(0x14, DMR_TX(phy_node(port)), card); 208 /* enable underflow interrupts */ 209 sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card); 210 } 211 } 212 sca_set_carrier(port); 213 } 214 215 #ifdef NEED_SCA_MSCI_INTR 216 /* MSCI interrupt service */ 217 static inline void sca_msci_intr(port_t *port) 218 { 219 u16 msci = get_msci(port); 220 card_t *card = port_to_card(port); 221 u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */ 222 223 /* Reset MSCI TX underrun and CDCD status bit */ 224 sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card); 225 226 if (stat & ST1_UDRN) { 227 /* TX Underrun error detected */ 228 port_to_dev(port)->stats.tx_errors++; 229 port_to_dev(port)->stats.tx_fifo_errors++; 230 } 231 232 if (stat & ST1_CDCD) 233 sca_set_carrier(port); 234 } 235 #endif 236 237 static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, 238 u16 rxin) 239 { 240 struct net_device *dev = port_to_dev(port); 241 struct sk_buff *skb; 242 u16 len; 243 u32 buff; 244 u32 maxlen; 245 u8 page; 246 247 len = readw(&desc->len); 248 skb = dev_alloc_skb(len); 249 if (!skb) { 250 dev->stats.rx_dropped++; 251 return; 252 } 253 254 buff = buffer_offset(port, rxin, 0); 255 page = buff / winsize(card); 256 buff = buff % winsize(card); 257 maxlen = winsize(card) - buff; 258 259 openwin(card, page); 260 261 if (len > maxlen) { 262 memcpy_fromio(skb->data, winbase(card) + buff, maxlen); 263 openwin(card, page + 1); 264 memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen); 265 } else { 266 memcpy_fromio(skb->data, winbase(card) + buff, len); 267 } 268 269 #ifndef PAGE0_ALWAYS_MAPPED 270 openwin(card, 0); /* select pkt_desc table page back */ 271 #endif 272 skb_put(skb, len); 273 #ifdef DEBUG_PKT 274 printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len); 275 debug_frame(skb); 276 #endif 277 dev->stats.rx_packets++; 278 dev->stats.rx_bytes += skb->len; 279 skb->protocol = hdlc_type_trans(skb, dev); 280 netif_rx(skb); 281 } 282 283 /* Receive DMA interrupt service */ 284 static inline void sca_rx_intr(port_t *port) 285 { 286 struct net_device *dev = port_to_dev(port); 287 u16 dmac = get_dmac_rx(port); 288 card_t *card = port_to_card(port); 289 u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */ 290 291 /* Reset DSR status bits */ 292 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, 293 DSR_RX(phy_node(port)), card); 294 295 if (stat & DSR_BOF) 296 /* Dropped one or more frames */ 297 dev->stats.rx_over_errors++; 298 299 while (1) { 300 u32 desc_off = desc_offset(port, port->rxin, 0); 301 pkt_desc __iomem *desc; 302 u32 cda = sca_inw(dmac + CDAL, card); 303 304 if (cda >= desc_off && (cda < desc_off + sizeof(pkt_desc))) 305 break; /* No frame received */ 306 307 desc = desc_address(port, port->rxin, 0); 308 stat = readb(&desc->stat); 309 if (!(stat & ST_RX_EOM)) 310 port->rxpart = 1; /* partial frame received */ 311 else if ((stat & ST_ERROR_MASK) || port->rxpart) { 312 dev->stats.rx_errors++; 313 if (stat & ST_RX_OVERRUN) 314 dev->stats.rx_fifo_errors++; 315 else if ((stat & (ST_RX_SHORT | ST_RX_ABORT | 316 ST_RX_RESBIT)) || port->rxpart) 317 dev->stats.rx_frame_errors++; 318 else if (stat & ST_RX_CRC) 319 dev->stats.rx_crc_errors++; 320 if (stat & ST_RX_EOM) 321 port->rxpart = 0; /* received last fragment */ 322 } else { 323 sca_rx(card, port, desc, port->rxin); 324 } 325 326 /* Set new error descriptor address */ 327 sca_outw(desc_off, dmac + EDAL, card); 328 port->rxin = next_desc(port, port->rxin, 0); 329 } 330 331 /* make sure RX DMA is enabled */ 332 sca_out(DSR_DE, DSR_RX(phy_node(port)), card); 333 } 334 335 /* Transmit DMA interrupt service */ 336 static inline void sca_tx_intr(port_t *port) 337 { 338 struct net_device *dev = port_to_dev(port); 339 u16 dmac = get_dmac_tx(port); 340 card_t *card = port_to_card(port); 341 u8 stat; 342 343 spin_lock(&port->lock); 344 345 stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */ 346 347 /* Reset DSR status bits */ 348 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, 349 DSR_TX(phy_node(port)), card); 350 351 while (1) { 352 pkt_desc __iomem *desc; 353 354 u32 desc_off = desc_offset(port, port->txlast, 1); 355 u32 cda = sca_inw(dmac + CDAL, card); 356 357 if (cda >= desc_off && (cda < desc_off + sizeof(pkt_desc))) 358 break; /* Transmitter is/will_be sending this frame */ 359 360 desc = desc_address(port, port->txlast, 1); 361 dev->stats.tx_packets++; 362 dev->stats.tx_bytes += readw(&desc->len); 363 writeb(0, &desc->stat); /* Free descriptor */ 364 port->txlast = next_desc(port, port->txlast, 1); 365 } 366 367 netif_wake_queue(dev); 368 spin_unlock(&port->lock); 369 } 370 371 static irqreturn_t sca_intr(int irq, void *dev_id) 372 { 373 card_t *card = dev_id; 374 int i; 375 u8 stat; 376 int handled = 0; 377 u8 page = sca_get_page(card); 378 379 while ((stat = sca_intr_status(card)) != 0) { 380 handled = 1; 381 for (i = 0; i < 2; i++) { 382 port_t *port = get_port(card, i); 383 384 if (port) { 385 if (stat & SCA_INTR_MSCI(i)) 386 sca_msci_intr(port); 387 388 if (stat & SCA_INTR_DMAC_RX(i)) 389 sca_rx_intr(port); 390 391 if (stat & SCA_INTR_DMAC_TX(i)) 392 sca_tx_intr(port); 393 } 394 } 395 } 396 397 openwin(card, page); /* Restore original page */ 398 return IRQ_RETVAL(handled); 399 } 400 401 static void sca_set_port(port_t *port) 402 { 403 card_t *card = port_to_card(port); 404 u16 msci = get_msci(port); 405 u8 md2 = sca_in(msci + MD2, card); 406 unsigned int tmc, br = 10, brv = 1024; 407 408 if (port->settings.clock_rate > 0) { 409 /* Try lower br for better accuracy*/ 410 do { 411 br--; 412 brv >>= 1; /* brv = 2^9 = 512 max in specs */ 413 414 /* Baud Rate = CLOCK_BASE / TMC / 2^BR */ 415 tmc = CLOCK_BASE / brv / port->settings.clock_rate; 416 } while (br > 1 && tmc <= 128); 417 418 if (tmc < 1) { 419 tmc = 1; 420 br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */ 421 brv = 1; 422 } else if (tmc > 255) { 423 tmc = 256; /* tmc=0 means 256 - low baud rates */ 424 } 425 426 port->settings.clock_rate = CLOCK_BASE / brv / tmc; 427 } else { 428 br = 9; /* Minimum clock rate */ 429 tmc = 256; /* 8bit = 0 */ 430 port->settings.clock_rate = CLOCK_BASE / (256 * 512); 431 } 432 433 port->rxs = (port->rxs & ~CLK_BRG_MASK) | br; 434 port->txs = (port->txs & ~CLK_BRG_MASK) | br; 435 port->tmc = tmc; 436 437 /* baud divisor - time constant*/ 438 sca_out(port->tmc, msci + TMC, card); 439 440 /* Set BRG bits */ 441 sca_out(port->rxs, msci + RXS, card); 442 sca_out(port->txs, msci + TXS, card); 443 444 if (port->settings.loopback) 445 md2 |= MD2_LOOPBACK; 446 else 447 md2 &= ~MD2_LOOPBACK; 448 449 sca_out(md2, msci + MD2, card); 450 } 451 452 static void sca_open(struct net_device *dev) 453 { 454 port_t *port = dev_to_port(dev); 455 card_t *card = port_to_card(port); 456 u16 msci = get_msci(port); 457 u8 md0, md2; 458 459 switch (port->encoding) { 460 case ENCODING_NRZ: 461 md2 = MD2_NRZ; 462 break; 463 case ENCODING_NRZI: 464 md2 = MD2_NRZI; 465 break; 466 case ENCODING_FM_MARK: 467 md2 = MD2_FM_MARK; 468 break; 469 case ENCODING_FM_SPACE: 470 md2 = MD2_FM_SPACE; 471 break; 472 default: 473 md2 = MD2_MANCHESTER; 474 } 475 476 if (port->settings.loopback) 477 md2 |= MD2_LOOPBACK; 478 479 switch (port->parity) { 480 case PARITY_CRC16_PR0: 481 md0 = MD0_HDLC | MD0_CRC_16_0; 482 break; 483 case PARITY_CRC16_PR1: 484 md0 = MD0_HDLC | MD0_CRC_16; 485 break; 486 case PARITY_CRC16_PR0_CCITT: 487 md0 = MD0_HDLC | MD0_CRC_ITU_0; 488 break; 489 case PARITY_CRC16_PR1_CCITT: 490 md0 = MD0_HDLC | MD0_CRC_ITU; 491 break; 492 default: 493 md0 = MD0_HDLC | MD0_CRC_NONE; 494 } 495 496 sca_out(CMD_RESET, msci + CMD, card); 497 sca_out(md0, msci + MD0, card); 498 sca_out(0x00, msci + MD1, card); /* no address field check */ 499 sca_out(md2, msci + MD2, card); 500 sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */ 501 sca_out(CTL_IDLE, msci + CTL, card); 502 503 /* Allow at least 8 bytes before requesting RX DMA operation */ 504 /* TX with higher priority and possibly with shorter transfers */ 505 sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/ 506 sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/ 507 sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */ 508 509 /* We're using the following interrupts: 510 * - TXINT (DMAC completed all transmisions, underrun or DCD change) 511 * - all DMA interrupts 512 */ 513 sca_set_carrier(port); 514 515 /* MSCI TX INT and RX INT A IRQ enable */ 516 sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card); 517 sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card); 518 sca_out(sca_in(IER0, card) | (phy_node(port) ? 0xC0 : 0x0C), 519 IER0, card); /* TXINT and RXINT */ 520 /* enable DMA IRQ */ 521 sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F), 522 IER1, card); 523 524 sca_out(port->tmc, msci + TMC, card); /* Restore registers */ 525 sca_out(port->rxs, msci + RXS, card); 526 sca_out(port->txs, msci + TXS, card); 527 sca_out(CMD_TX_ENABLE, msci + CMD, card); 528 sca_out(CMD_RX_ENABLE, msci + CMD, card); 529 530 netif_start_queue(dev); 531 } 532 533 static void sca_close(struct net_device *dev) 534 { 535 port_t *port = dev_to_port(dev); 536 card_t *card = port_to_card(port); 537 538 /* reset channel */ 539 sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port)); 540 /* disable MSCI interrupts */ 541 sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0), 542 IER0, card); 543 /* disable DMA interrupts */ 544 sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0), 545 IER1, card); 546 547 netif_stop_queue(dev); 548 } 549 550 static int sca_attach(struct net_device *dev, unsigned short encoding, 551 unsigned short parity) 552 { 553 if (encoding != ENCODING_NRZ && 554 encoding != ENCODING_NRZI && 555 encoding != ENCODING_FM_MARK && 556 encoding != ENCODING_FM_SPACE && 557 encoding != ENCODING_MANCHESTER) 558 return -EINVAL; 559 560 if (parity != PARITY_NONE && 561 parity != PARITY_CRC16_PR0 && 562 parity != PARITY_CRC16_PR1 && 563 parity != PARITY_CRC16_PR0_CCITT && 564 parity != PARITY_CRC16_PR1_CCITT) 565 return -EINVAL; 566 567 dev_to_port(dev)->encoding = encoding; 568 dev_to_port(dev)->parity = parity; 569 return 0; 570 } 571 572 #ifdef DEBUG_RINGS 573 static void sca_dump_rings(struct net_device *dev) 574 { 575 port_t *port = dev_to_port(dev); 576 card_t *card = port_to_card(port); 577 u16 cnt; 578 #ifndef PAGE0_ALWAYS_MAPPED 579 u8 page = sca_get_page(card); 580 581 openwin(card, 0); 582 #endif 583 584 printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive", 585 sca_inw(get_dmac_rx(port) + CDAL, card), 586 sca_inw(get_dmac_rx(port) + EDAL, card), 587 sca_in(DSR_RX(phy_node(port)), card), port->rxin, 588 sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in"); 589 for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++) 590 pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat))); 591 pr_cont("\n"); 592 593 printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " 594 "last=%u %sactive", 595 sca_inw(get_dmac_tx(port) + CDAL, card), 596 sca_inw(get_dmac_tx(port) + EDAL, card), 597 sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast, 598 sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in"); 599 600 for (cnt = 0; cnt < port_to_card(port)->tx_ring_buffers; cnt++) 601 pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat))); 602 pr_cont("\n"); 603 604 printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, ST: %02x %02x %02x %02x," 605 " FST: %02x CST: %02x %02x\n", 606 sca_in(get_msci(port) + MD0, card), 607 sca_in(get_msci(port) + MD1, card), 608 sca_in(get_msci(port) + MD2, card), 609 sca_in(get_msci(port) + ST0, card), 610 sca_in(get_msci(port) + ST1, card), 611 sca_in(get_msci(port) + ST2, card), 612 sca_in(get_msci(port) + ST3, card), 613 sca_in(get_msci(port) + FST, card), 614 sca_in(get_msci(port) + CST0, card), 615 sca_in(get_msci(port) + CST1, card)); 616 617 printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card), 618 sca_in(ISR1, card), sca_in(ISR2, card)); 619 620 #ifndef PAGE0_ALWAYS_MAPPED 621 openwin(card, page); /* Restore original page */ 622 #endif 623 } 624 #endif /* DEBUG_RINGS */ 625 626 static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev) 627 { 628 port_t *port = dev_to_port(dev); 629 card_t *card = port_to_card(port); 630 pkt_desc __iomem *desc; 631 u32 buff, len; 632 u8 page; 633 u32 maxlen; 634 635 spin_lock_irq(&port->lock); 636 637 desc = desc_address(port, port->txin + 1, 1); 638 BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */ 639 640 #ifdef DEBUG_PKT 641 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len); 642 debug_frame(skb); 643 #endif 644 645 desc = desc_address(port, port->txin, 1); 646 buff = buffer_offset(port, port->txin, 1); 647 len = skb->len; 648 page = buff / winsize(card); 649 buff = buff % winsize(card); 650 maxlen = winsize(card) - buff; 651 652 openwin(card, page); 653 if (len > maxlen) { 654 memcpy_toio(winbase(card) + buff, skb->data, maxlen); 655 openwin(card, page + 1); 656 memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen); 657 } else { 658 memcpy_toio(winbase(card) + buff, skb->data, len); 659 } 660 661 #ifndef PAGE0_ALWAYS_MAPPED 662 openwin(card, 0); /* select pkt_desc table page back */ 663 #endif 664 writew(len, &desc->len); 665 writeb(ST_TX_EOM, &desc->stat); 666 667 port->txin = next_desc(port, port->txin, 1); 668 sca_outw(desc_offset(port, port->txin, 1), 669 get_dmac_tx(port) + EDAL, card); 670 671 sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */ 672 673 desc = desc_address(port, port->txin + 1, 1); 674 if (readb(&desc->stat)) /* allow 1 packet gap */ 675 netif_stop_queue(dev); 676 677 spin_unlock_irq(&port->lock); 678 679 dev_kfree_skb(skb); 680 return NETDEV_TX_OK; 681 } 682 683 #ifdef NEED_DETECT_RAM 684 static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize) 685 { 686 /* Round RAM size to 32 bits, fill from end to start */ 687 u32 i = ramsize &= ~3; 688 u32 size = winsize(card); 689 690 openwin(card, (i - 4) / size); /* select last window */ 691 692 do { 693 i -= 4; 694 if ((i + 4) % size == 0) 695 openwin(card, i / size); 696 writel(i ^ 0x12345678, rambase + i % size); 697 } while (i > 0); 698 699 for (i = 0; i < ramsize ; i += 4) { 700 if (i % size == 0) 701 openwin(card, i / size); 702 703 if (readl(rambase + i % size) != (i ^ 0x12345678)) 704 break; 705 } 706 707 return i; 708 } 709 #endif /* NEED_DETECT_RAM */ 710 711 static void sca_init(card_t *card, int wait_states) 712 { 713 sca_out(wait_states, WCRL, card); /* Wait Control */ 714 sca_out(wait_states, WCRM, card); 715 sca_out(wait_states, WCRH, card); 716 717 sca_out(0, DMER, card); /* DMA Master disable */ 718 sca_out(0x03, PCR, card); /* DMA priority */ 719 sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */ 720 sca_out(0, DSR_TX(0), card); 721 sca_out(0, DSR_RX(1), card); 722 sca_out(0, DSR_TX(1), card); 723 sca_out(DMER_DME, DMER, card); /* DMA Master enable */ 724 } 725