1 /* Agere Systems Inc. 2 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 3 * 4 * Copyright © 2005 Agere Systems Inc. 5 * All rights reserved. 6 * http://www.agere.com 7 * 8 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 9 * 10 *------------------------------------------------------------------------------ 11 * 12 * SOFTWARE LICENSE 13 * 14 * This software is provided subject to the following terms and conditions, 15 * which you should read carefully before using the software. Using this 16 * software indicates your acceptance of these terms and conditions. If you do 17 * not agree with these terms and conditions, do not use the software. 18 * 19 * Copyright © 2005 Agere Systems Inc. 20 * All rights reserved. 21 * 22 * Redistribution and use in source or binary forms, with or without 23 * modifications, are permitted provided that the following conditions are met: 24 * 25 * . Redistributions of source code must retain the above copyright notice, this 26 * list of conditions and the following Disclaimer as comments in the code as 27 * well as in the documentation and/or other materials provided with the 28 * distribution. 29 * 30 * . Redistributions in binary form must reproduce the above copyright notice, 31 * this list of conditions and the following Disclaimer in the documentation 32 * and/or other materials provided with the distribution. 33 * 34 * . Neither the name of Agere Systems Inc. nor the names of the contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * Disclaimer 39 * 40 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 41 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 42 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 43 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 44 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 48 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 51 * DAMAGE. 52 */ 53 54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 55 56 #include <linux/pci.h> 57 #include <linux/module.h> 58 #include <linux/types.h> 59 #include <linux/kernel.h> 60 61 #include <linux/sched.h> 62 #include <linux/ptrace.h> 63 #include <linux/slab.h> 64 #include <linux/ctype.h> 65 #include <linux/string.h> 66 #include <linux/timer.h> 67 #include <linux/interrupt.h> 68 #include <linux/in.h> 69 #include <linux/delay.h> 70 #include <linux/bitops.h> 71 #include <linux/io.h> 72 73 #include <linux/netdevice.h> 74 #include <linux/etherdevice.h> 75 #include <linux/skbuff.h> 76 #include <linux/if_arp.h> 77 #include <linux/ioport.h> 78 #include <linux/crc32.h> 79 #include <linux/random.h> 80 #include <linux/phy.h> 81 82 #include "et131x.h" 83 84 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); 85 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); 86 MODULE_LICENSE("Dual BSD/GPL"); 87 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems"); 88 89 /* EEPROM defines */ 90 #define MAX_NUM_REGISTER_POLLS 1000 91 #define MAX_NUM_WRITE_RETRIES 2 92 93 /* MAC defines */ 94 #define COUNTER_WRAP_16_BIT 0x10000 95 #define COUNTER_WRAP_12_BIT 0x1000 96 97 /* PCI defines */ 98 #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ 99 #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ 100 101 /* ISR defines */ 102 /* For interrupts, normal running is: 103 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, 104 * watchdog_interrupt & txdma_xfer_done 105 * 106 * In both cases, when flow control is enabled for either Tx or bi-direction, 107 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the 108 * buffer rings are running low. 109 */ 110 #define INT_MASK_DISABLE 0xffffffff 111 112 /* NOTE: Masking out MAC_STAT Interrupt for now... 113 * #define INT_MASK_ENABLE 0xfff6bf17 114 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 115 */ 116 #define INT_MASK_ENABLE 0xfffebf17 117 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 118 119 /* General defines */ 120 /* Packet and header sizes */ 121 #define NIC_MIN_PACKET_SIZE 60 122 123 /* Multicast list size */ 124 #define NIC_MAX_MCAST_LIST 128 125 126 /* Supported Filters */ 127 #define ET131X_PACKET_TYPE_DIRECTED 0x0001 128 #define ET131X_PACKET_TYPE_MULTICAST 0x0002 129 #define ET131X_PACKET_TYPE_BROADCAST 0x0004 130 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 131 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 132 133 /* Tx Timeout */ 134 #define ET131X_TX_TIMEOUT (1 * HZ) 135 #define NIC_SEND_HANG_THRESHOLD 0 136 137 /* MP_ADAPTER flags */ 138 #define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 139 140 /* MP_SHARED flags */ 141 #define FMP_ADAPTER_LOWER_POWER 0x00200000 142 143 #define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 144 #define FMP_ADAPTER_HARDWARE_ERROR 0x04000000 145 146 #define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 147 148 /* Some offsets in PCI config space that are actually used. */ 149 #define ET1310_PCI_MAC_ADDRESS 0xA4 150 #define ET1310_PCI_EEPROM_STATUS 0xB2 151 #define ET1310_PCI_ACK_NACK 0xC0 152 #define ET1310_PCI_REPLAY 0xC2 153 #define ET1310_PCI_L0L1LATENCY 0xCF 154 155 /* PCI Product IDs */ 156 #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ 157 #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ 158 159 /* Define order of magnitude converter */ 160 #define NANO_IN_A_MICRO 1000 161 162 #define PARM_RX_NUM_BUFS_DEF 4 163 #define PARM_RX_TIME_INT_DEF 10 164 #define PARM_RX_MEM_END_DEF 0x2bc 165 #define PARM_TX_TIME_INT_DEF 40 166 #define PARM_TX_NUM_BUFS_DEF 4 167 #define PARM_DMA_CACHE_DEF 0 168 169 /* RX defines */ 170 #define FBR_CHUNKS 32 171 #define MAX_DESC_PER_RING_RX 1024 172 173 /* number of RFDs - default and min */ 174 #define RFD_LOW_WATER_MARK 40 175 #define NIC_DEFAULT_NUM_RFD 1024 176 #define NUM_FBRS 2 177 178 #define MAX_PACKETS_HANDLED 256 179 180 #define ALCATEL_MULTICAST_PKT 0x01000000 181 #define ALCATEL_BROADCAST_PKT 0x02000000 182 183 /* typedefs for Free Buffer Descriptors */ 184 struct fbr_desc { 185 u32 addr_lo; 186 u32 addr_hi; 187 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ 188 }; 189 190 /* Packet Status Ring Descriptors 191 * 192 * Word 0: 193 * 194 * top 16 bits are from the Alcatel Status Word as enumerated in 195 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) 196 * 197 * 0: hp hash pass 198 * 1: ipa IP checksum assist 199 * 2: ipp IP checksum pass 200 * 3: tcpa TCP checksum assist 201 * 4: tcpp TCP checksum pass 202 * 5: wol WOL Event 203 * 6: rxmac_error RXMAC Error Indicator 204 * 7: drop Drop packet 205 * 8: ft Frame Truncated 206 * 9: jp Jumbo Packet 207 * 10: vp VLAN Packet 208 * 11-15: unused 209 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous 210 * 17: asw_RX_DV_event short receive event detected 211 * 18: asw_false_carrier_event bad carrier since last good packet 212 * 19: asw_code_err one or more nibbles signalled as errors 213 * 20: asw_CRC_err CRC error 214 * 21: asw_len_chk_err frame length field incorrect 215 * 22: asw_too_long frame length > 1518 bytes 216 * 23: asw_OK valid CRC + no code error 217 * 24: asw_multicast has a multicast address 218 * 25: asw_broadcast has a broadcast address 219 * 26: asw_dribble_nibble spurious bits after EOP 220 * 27: asw_control_frame is a control frame 221 * 28: asw_pause_frame is a pause frame 222 * 29: asw_unsupported_op unsupported OP code 223 * 30: asw_VLAN_tag VLAN tag detected 224 * 31: asw_long_evt Rx long event 225 * 226 * Word 1: 227 * 0-15: length length in bytes 228 * 16-25: bi Buffer Index 229 * 26-27: ri Ring Index 230 * 28-31: reserved 231 */ 232 struct pkt_stat_desc { 233 u32 word0; 234 u32 word1; 235 }; 236 237 /* Typedefs for the RX DMA status word */ 238 239 /* rx status word 0 holds part of the status bits of the Rx DMA engine 240 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word 241 * which contains the Free Buffer ring 0 and 1 available offset. 242 * 243 * bit 0-9 FBR1 offset 244 * bit 10 Wrap flag for FBR1 245 * bit 16-25 FBR0 offset 246 * bit 26 Wrap flag for FBR0 247 */ 248 249 /* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine 250 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word 251 * which contains the Packet Status Ring available offset. 252 * 253 * bit 0-15 reserved 254 * bit 16-27 PSRoffset 255 * bit 28 PSRwrap 256 * bit 29-31 unused 257 */ 258 259 /* struct rx_status_block is a structure representing the status of the Rx 260 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 261 */ 262 struct rx_status_block { 263 u32 word0; 264 u32 word1; 265 }; 266 267 /* Structure for look-up table holding free buffer ring pointers, addresses 268 * and state. 269 */ 270 struct fbr_lookup { 271 void *virt[MAX_DESC_PER_RING_RX]; 272 u32 bus_high[MAX_DESC_PER_RING_RX]; 273 u32 bus_low[MAX_DESC_PER_RING_RX]; 274 void *ring_virtaddr; 275 dma_addr_t ring_physaddr; 276 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; 277 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; 278 u32 local_full; 279 u32 num_entries; 280 dma_addr_t buffsize; 281 }; 282 283 /* struct rx_ring is the structure representing the adaptor's local 284 * reference(s) to the rings 285 */ 286 struct rx_ring { 287 struct fbr_lookup *fbr[NUM_FBRS]; 288 void *ps_ring_virtaddr; 289 dma_addr_t ps_ring_physaddr; 290 u32 local_psr_full; 291 u32 psr_entries; 292 293 struct rx_status_block *rx_status_block; 294 dma_addr_t rx_status_bus; 295 296 struct list_head recv_list; 297 u32 num_ready_recv; 298 299 u32 num_rfd; 300 301 bool unfinished_receives; 302 }; 303 304 /* TX defines */ 305 /* word 2 of the control bits in the Tx Descriptor ring for the ET-1310 306 * 307 * 0-15: length of packet 308 * 16-27: VLAN tag 309 * 28: VLAN CFI 310 * 29-31: VLAN priority 311 * 312 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 313 * 314 * 0: last packet in the sequence 315 * 1: first packet in the sequence 316 * 2: interrupt the processor when this pkt sent 317 * 3: Control word - no packet data 318 * 4: Issue half-duplex backpressure : XON/XOFF 319 * 5: send pause frame 320 * 6: Tx frame has error 321 * 7: append CRC 322 * 8: MAC override 323 * 9: pad packet 324 * 10: Packet is a Huge packet 325 * 11: append VLAN tag 326 * 12: IP checksum assist 327 * 13: TCP checksum assist 328 * 14: UDP checksum assist 329 */ 330 #define TXDESC_FLAG_LASTPKT 0x0001 331 #define TXDESC_FLAG_FIRSTPKT 0x0002 332 #define TXDESC_FLAG_INTPROC 0x0004 333 334 /* struct tx_desc represents each descriptor on the ring */ 335 struct tx_desc { 336 u32 addr_hi; 337 u32 addr_lo; 338 u32 len_vlan; /* control words how to xmit the */ 339 u32 flags; /* data (detailed above) */ 340 }; 341 342 /* The status of the Tx DMA engine it sits in free memory, and is pointed to 343 * by 0x101c / 0x1020. This is a DMA10 type 344 */ 345 346 /* TCB (Transmit Control Block: Host Side) */ 347 struct tcb { 348 struct tcb *next; /* Next entry in ring */ 349 u32 count; /* Used to spot stuck/lost packets */ 350 u32 stale; /* Used to spot stuck/lost packets */ 351 struct sk_buff *skb; /* Network skb we are tied to */ 352 u32 index; /* Ring indexes */ 353 u32 index_start; 354 }; 355 356 /* Structure representing our local reference(s) to the ring */ 357 struct tx_ring { 358 /* TCB (Transmit Control Block) memory and lists */ 359 struct tcb *tcb_ring; 360 361 /* List of TCBs that are ready to be used */ 362 struct tcb *tcb_qhead; 363 struct tcb *tcb_qtail; 364 365 /* list of TCBs that are currently being sent. */ 366 struct tcb *send_head; 367 struct tcb *send_tail; 368 int used; 369 370 /* The actual descriptor ring */ 371 struct tx_desc *tx_desc_ring; 372 dma_addr_t tx_desc_ring_pa; 373 374 /* send_idx indicates where we last wrote to in the descriptor ring. */ 375 u32 send_idx; 376 377 /* The location of the write-back status block */ 378 u32 *tx_status; 379 dma_addr_t tx_status_pa; 380 381 /* Packets since the last IRQ: used for interrupt coalescing */ 382 int since_irq; 383 }; 384 385 /* Do not change these values: if changed, then change also in respective 386 * TXdma and Rxdma engines 387 */ 388 #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ 389 #define NUM_TCB 64 390 391 /* These values are all superseded by registry entries to facilitate tuning. 392 * Once the desired performance has been achieved, the optimal registry values 393 * should be re-populated to these #defines: 394 */ 395 #define TX_ERROR_PERIOD 1000 396 397 #define LO_MARK_PERCENT_FOR_PSR 15 398 #define LO_MARK_PERCENT_FOR_RX 15 399 400 /* RFD (Receive Frame Descriptor) */ 401 struct rfd { 402 struct list_head list_node; 403 struct sk_buff *skb; 404 u32 len; /* total size of receive frame */ 405 u16 bufferindex; 406 u8 ringindex; 407 }; 408 409 /* Flow Control */ 410 #define FLOW_BOTH 0 411 #define FLOW_TXONLY 1 412 #define FLOW_RXONLY 2 413 #define FLOW_NONE 3 414 415 /* Struct to define some device statistics */ 416 struct ce_stats { 417 u32 multicast_pkts_rcvd; 418 u32 rcvd_pkts_dropped; 419 420 u32 tx_underflows; 421 u32 tx_collisions; 422 u32 tx_excessive_collisions; 423 u32 tx_first_collisions; 424 u32 tx_late_collisions; 425 u32 tx_max_pkt_errs; 426 u32 tx_deferred; 427 428 u32 rx_overflows; 429 u32 rx_length_errs; 430 u32 rx_align_errs; 431 u32 rx_crc_errs; 432 u32 rx_code_violations; 433 u32 rx_other_errs; 434 435 u32 interrupt_status; 436 }; 437 438 /* The private adapter structure */ 439 struct et131x_adapter { 440 struct net_device *netdev; 441 struct pci_dev *pdev; 442 struct mii_bus *mii_bus; 443 struct napi_struct napi; 444 445 /* Flags that indicate current state of the adapter */ 446 u32 flags; 447 448 /* local link state, to determine if a state change has occurred */ 449 int link; 450 451 /* Configuration */ 452 u8 rom_addr[ETH_ALEN]; 453 u8 addr[ETH_ALEN]; 454 bool has_eeprom; 455 u8 eeprom_data[2]; 456 457 spinlock_t tcb_send_qlock; /* protects the tx_ring send tcb list */ 458 spinlock_t tcb_ready_qlock; /* protects the tx_ring ready tcb list */ 459 spinlock_t rcv_lock; /* protects the rx_ring receive list */ 460 461 /* Packet Filter and look ahead size */ 462 u32 packet_filter; 463 464 /* multicast list */ 465 u32 multicast_addr_count; 466 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; 467 468 /* Pointer to the device's PCI register space */ 469 struct address_map __iomem *regs; 470 471 /* Registry parameters */ 472 u8 wanted_flow; /* Flow we want for 802.3x flow control */ 473 u32 registry_jumbo_packet; /* Max supported ethernet packet size */ 474 475 /* Derived from the registry: */ 476 u8 flow; /* flow control validated by the far-end */ 477 478 /* Minimize init-time */ 479 struct timer_list error_timer; 480 481 /* variable putting the phy into coma mode when boot up with no cable 482 * plugged in after 5 seconds 483 */ 484 u8 boot_coma; 485 486 /* Tx Memory Variables */ 487 struct tx_ring tx_ring; 488 489 /* Rx Memory Variables */ 490 struct rx_ring rx_ring; 491 492 struct ce_stats stats; 493 }; 494 495 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) 496 { 497 u32 reg; 498 int i; 499 500 /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and 501 * bits 7,1:0 both equal to 1, at least once after reset. 502 * Subsequent operations need only to check that bits 1:0 are equal 503 * to 1 prior to starting a single byte read/write 504 */ 505 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { 506 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®)) 507 return -EIO; 508 509 /* I2C idle and Phy Queue Avail both true */ 510 if ((reg & 0x3000) == 0x3000) { 511 if (status) 512 *status = reg; 513 return reg & 0xFF; 514 } 515 } 516 return -ETIMEDOUT; 517 } 518 519 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) 520 { 521 struct pci_dev *pdev = adapter->pdev; 522 int index = 0; 523 int retries; 524 int err = 0; 525 int writeok = 0; 526 u32 status; 527 u32 val = 0; 528 529 /* For an EEPROM, an I2C single byte write is defined as a START 530 * condition followed by the device address, EEPROM address, one byte 531 * of data and a STOP condition. The STOP condition will trigger the 532 * EEPROM's internally timed write cycle to the nonvolatile memory. 533 * All inputs are disabled during this write cycle and the EEPROM will 534 * not respond to any access until the internal write is complete. 535 */ 536 err = eeprom_wait_ready(pdev, NULL); 537 if (err < 0) 538 return err; 539 540 /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, 541 * and bits 1:0 both =0. Bit 5 should be set according to the 542 * type of EEPROM being accessed (1=two byte addressing, 0=one 543 * byte addressing). 544 */ 545 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 546 LBCIF_CONTROL_LBCIF_ENABLE | 547 LBCIF_CONTROL_I2C_WRITE)) 548 return -EIO; 549 550 /* Prepare EEPROM address for Step 3 */ 551 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { 552 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 553 break; 554 /* Write the data to the LBCIF Data Register (the I2C write 555 * will begin). 556 */ 557 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) 558 break; 559 /* Monitor bit 1:0 of the LBCIF Status Register. When bits 560 * 1:0 are both equal to 1, the I2C write has completed and the 561 * internal write cycle of the EEPROM is about to start. 562 * (bits 1:0 = 01 is a legal state while waiting from both 563 * equal to 1, but bits 1:0 = 10 is invalid and implies that 564 * something is broken). 565 */ 566 err = eeprom_wait_ready(pdev, &status); 567 if (err < 0) 568 return 0; 569 570 /* Check bit 3 of the LBCIF Status Register. If equal to 1, 571 * an error has occurred.Don't break here if we are revision 572 * 1, this is so we do a blind write for load bug. 573 */ 574 if ((status & LBCIF_STATUS_GENERAL_ERROR) && 575 adapter->pdev->revision == 0) 576 break; 577 578 /* Check bit 2 of the LBCIF Status Register. If equal to 1 an 579 * ACK error has occurred on the address phase of the write. 580 * This could be due to an actual hardware failure or the 581 * EEPROM may still be in its internal write cycle from a 582 * previous write. This write operation was ignored and must be 583 *repeated later. 584 */ 585 if (status & LBCIF_STATUS_ACK_ERROR) { 586 /* This could be due to an actual hardware failure 587 * or the EEPROM may still be in its internal write 588 * cycle from a previous write. This write operation 589 * was ignored and must be repeated later. 590 */ 591 udelay(10); 592 continue; 593 } 594 595 writeok = 1; 596 break; 597 } 598 599 udelay(10); 600 601 while (1) { 602 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 603 LBCIF_CONTROL_LBCIF_ENABLE)) 604 writeok = 0; 605 606 /* Do read until internal ACK_ERROR goes away meaning write 607 * completed 608 */ 609 do { 610 pci_write_config_dword(pdev, 611 LBCIF_ADDRESS_REGISTER, 612 addr); 613 do { 614 pci_read_config_dword(pdev, 615 LBCIF_DATA_REGISTER, 616 &val); 617 } while ((val & 0x00010000) == 0); 618 } while (val & 0x00040000); 619 620 if ((val & 0xFF00) != 0xC000 || index == 10000) 621 break; 622 index++; 623 } 624 return writeok ? 0 : -EIO; 625 } 626 627 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) 628 { 629 struct pci_dev *pdev = adapter->pdev; 630 int err; 631 u32 status; 632 633 /* A single byte read is similar to the single byte write, with the 634 * exception of the data flow: 635 */ 636 err = eeprom_wait_ready(pdev, NULL); 637 if (err < 0) 638 return err; 639 /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, 640 * and bits 1:0 both =0. Bit 5 should be set according to the type 641 * of EEPROM being accessed (1=two byte addressing, 0=one byte 642 * addressing). 643 */ 644 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 645 LBCIF_CONTROL_LBCIF_ENABLE)) 646 return -EIO; 647 /* Write the address to the LBCIF Address Register (I2C read will 648 * begin). 649 */ 650 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 651 return -EIO; 652 /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read 653 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure 654 * has occurred). 655 */ 656 err = eeprom_wait_ready(pdev, &status); 657 if (err < 0) 658 return err; 659 /* Regardless of error status, read data byte from LBCIF Data 660 * Register. 661 */ 662 *pdata = err; 663 664 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; 665 } 666 667 static int et131x_init_eeprom(struct et131x_adapter *adapter) 668 { 669 struct pci_dev *pdev = adapter->pdev; 670 u8 eestatus; 671 672 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus); 673 674 /* THIS IS A WORKAROUND: 675 * I need to call this function twice to get my card in a 676 * LG M1 Express Dual running. I tried also a msleep before this 677 * function, because I thought there could be some time conditions 678 * but it didn't work. Call the whole function twice also work. 679 */ 680 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { 681 dev_err(&pdev->dev, 682 "Could not read PCI config space for EEPROM Status\n"); 683 return -EIO; 684 } 685 686 /* Determine if the error(s) we care about are present. If they are 687 * present we need to fail. 688 */ 689 if (eestatus & 0x4C) { 690 int write_failed = 0; 691 692 if (pdev->revision == 0x01) { 693 int i; 694 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; 695 696 /* Re-write the first 4 bytes if we have an eeprom 697 * present and the revision id is 1, this fixes the 698 * corruption seen with 1310 B Silicon 699 */ 700 for (i = 0; i < 3; i++) 701 if (eeprom_write(adapter, i, eedata[i]) < 0) 702 write_failed = 1; 703 } 704 if (pdev->revision != 0x01 || write_failed) { 705 dev_err(&pdev->dev, 706 "Fatal EEPROM Status Error - 0x%04x\n", 707 eestatus); 708 709 /* This error could mean that there was an error 710 * reading the eeprom or that the eeprom doesn't exist. 711 * We will treat each case the same and not try to 712 * gather additional information that normally would 713 * come from the eeprom, like MAC Address 714 */ 715 adapter->has_eeprom = 0; 716 return -EIO; 717 } 718 } 719 adapter->has_eeprom = 1; 720 721 /* Read the EEPROM for information regarding LED behavior. Refer to 722 * et131x_xcvr_init() for its use. 723 */ 724 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); 725 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); 726 727 if (adapter->eeprom_data[0] != 0xcd) 728 /* Disable all optional features */ 729 adapter->eeprom_data[1] = 0x00; 730 731 return 0; 732 } 733 734 static void et131x_rx_dma_enable(struct et131x_adapter *adapter) 735 { 736 /* Setup the receive dma configuration register for normal operation */ 737 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE; 738 struct rx_ring *rx_ring = &adapter->rx_ring; 739 740 if (rx_ring->fbr[1]->buffsize == 4096) 741 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO; 742 else if (rx_ring->fbr[1]->buffsize == 8192) 743 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI; 744 else if (rx_ring->fbr[1]->buffsize == 16384) 745 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI; 746 747 csr |= ET_RXDMA_CSR_FBR0_ENABLE; 748 if (rx_ring->fbr[0]->buffsize == 256) 749 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO; 750 else if (rx_ring->fbr[0]->buffsize == 512) 751 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI; 752 else if (rx_ring->fbr[0]->buffsize == 1024) 753 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI; 754 writel(csr, &adapter->regs->rxdma.csr); 755 756 csr = readl(&adapter->regs->rxdma.csr); 757 if (csr & ET_RXDMA_CSR_HALT_STATUS) { 758 udelay(5); 759 csr = readl(&adapter->regs->rxdma.csr); 760 if (csr & ET_RXDMA_CSR_HALT_STATUS) { 761 dev_err(&adapter->pdev->dev, 762 "RX Dma failed to exit halt state. CSR 0x%08x\n", 763 csr); 764 } 765 } 766 } 767 768 static void et131x_rx_dma_disable(struct et131x_adapter *adapter) 769 { 770 u32 csr; 771 /* Setup the receive dma configuration register */ 772 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE, 773 &adapter->regs->rxdma.csr); 774 csr = readl(&adapter->regs->rxdma.csr); 775 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) { 776 udelay(5); 777 csr = readl(&adapter->regs->rxdma.csr); 778 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) 779 dev_err(&adapter->pdev->dev, 780 "RX Dma failed to enter halt state. CSR 0x%08x\n", 781 csr); 782 } 783 } 784 785 static void et131x_tx_dma_enable(struct et131x_adapter *adapter) 786 { 787 /* Setup the transmit dma configuration register for normal 788 * operation 789 */ 790 writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), 791 &adapter->regs->txdma.csr); 792 } 793 794 static inline void add_10bit(u32 *v, int n) 795 { 796 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); 797 } 798 799 static inline void add_12bit(u32 *v, int n) 800 { 801 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); 802 } 803 804 static void et1310_config_mac_regs1(struct et131x_adapter *adapter) 805 { 806 struct mac_regs __iomem *macregs = &adapter->regs->mac; 807 u32 station1; 808 u32 station2; 809 u32 ipg; 810 811 /* First we need to reset everything. Write to MAC configuration 812 * register 1 to perform reset. 813 */ 814 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | 815 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | 816 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC, 817 ¯egs->cfg1); 818 819 /* Next lets configure the MAC Inter-packet gap register */ 820 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ 821 ipg |= 0x50 << 8; /* ifg enforce 0x50 */ 822 writel(ipg, ¯egs->ipg); 823 824 /* Next lets configure the MAC Half Duplex register */ 825 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ 826 writel(0x00A1F037, ¯egs->hfdp); 827 828 /* Next lets configure the MAC Interface Control register */ 829 writel(0, ¯egs->if_ctrl); 830 831 writel(ET_MAC_MIIMGMT_CLK_RST, ¯egs->mii_mgmt_cfg); 832 833 /* Next lets configure the MAC Station Address register. These 834 * values are read from the EEPROM during initialization and stored 835 * in the adapter structure. We write what is stored in the adapter 836 * structure to the MAC Station Address registers high and low. This 837 * station address is used for generating and checking pause control 838 * packets. 839 */ 840 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | 841 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); 842 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | 843 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | 844 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | 845 adapter->addr[2]; 846 writel(station1, ¯egs->station_addr_1); 847 writel(station2, ¯egs->station_addr_2); 848 849 /* Max ethernet packet in bytes that will be passed by the mac without 850 * being truncated. Allow the MAC to pass 4 more than our max packet 851 * size. This is 4 for the Ethernet CRC. 852 * 853 * Packets larger than (registry_jumbo_packet) that do not contain a 854 * VLAN ID will be dropped by the Rx function. 855 */ 856 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len); 857 858 /* clear out MAC config reset */ 859 writel(0, ¯egs->cfg1); 860 } 861 862 static void et1310_config_mac_regs2(struct et131x_adapter *adapter) 863 { 864 int32_t delay = 0; 865 struct mac_regs __iomem *mac = &adapter->regs->mac; 866 struct phy_device *phydev = adapter->netdev->phydev; 867 u32 cfg1; 868 u32 cfg2; 869 u32 ifctrl; 870 u32 ctl; 871 872 ctl = readl(&adapter->regs->txmac.ctl); 873 cfg1 = readl(&mac->cfg1); 874 cfg2 = readl(&mac->cfg2); 875 ifctrl = readl(&mac->if_ctrl); 876 877 /* Set up the if mode bits */ 878 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK; 879 if (phydev->speed == SPEED_1000) { 880 cfg2 |= ET_MAC_CFG2_IFMODE_1000; 881 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE; 882 } else { 883 cfg2 |= ET_MAC_CFG2_IFMODE_100; 884 ifctrl |= ET_MAC_IFCTRL_PHYMODE; 885 } 886 887 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE | 888 ET_MAC_CFG1_TX_FLOW; 889 890 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW); 891 if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH) 892 cfg1 |= ET_MAC_CFG1_RX_FLOW; 893 writel(cfg1, &mac->cfg1); 894 895 /* Now we need to initialize the MAC Configuration 2 register */ 896 /* preamble 7, check length, huge frame off, pad crc, crc enable 897 * full duplex off 898 */ 899 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT; 900 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK; 901 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC; 902 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE; 903 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME; 904 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX; 905 906 if (phydev->duplex == DUPLEX_FULL) 907 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX; 908 909 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE; 910 if (phydev->duplex == DUPLEX_HALF) 911 ifctrl |= ET_MAC_IFCTRL_GHDMODE; 912 913 writel(ifctrl, &mac->if_ctrl); 914 writel(cfg2, &mac->cfg2); 915 916 do { 917 udelay(10); 918 delay++; 919 cfg1 = readl(&mac->cfg1); 920 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100); 921 922 if (delay == 100) { 923 dev_warn(&adapter->pdev->dev, 924 "Syncd bits did not respond correctly cfg1 word 0x%08x\n", 925 cfg1); 926 } 927 928 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE; 929 writel(ctl, &adapter->regs->txmac.ctl); 930 931 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) { 932 et131x_rx_dma_enable(adapter); 933 et131x_tx_dma_enable(adapter); 934 } 935 } 936 937 static int et1310_in_phy_coma(struct et131x_adapter *adapter) 938 { 939 u32 pmcsr = readl(&adapter->regs->global.pm_csr); 940 941 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; 942 } 943 944 static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) 945 { 946 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 947 u32 hash1 = 0; 948 u32 hash2 = 0; 949 u32 hash3 = 0; 950 u32 hash4 = 0; 951 u32 pm_csr; 952 953 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision 954 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not 955 * specified) then we should pass NO multi-cast addresses to the 956 * driver. 957 */ 958 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { 959 int i; 960 961 /* Loop through our multicast array and set up the device */ 962 for (i = 0; i < adapter->multicast_addr_count; i++) { 963 u32 result; 964 965 result = ether_crc(6, adapter->multicast_list[i]); 966 967 result = (result & 0x3F800000) >> 23; 968 969 if (result < 32) { 970 hash1 |= (1 << result); 971 } else if ((31 < result) && (result < 64)) { 972 result -= 32; 973 hash2 |= (1 << result); 974 } else if ((63 < result) && (result < 96)) { 975 result -= 64; 976 hash3 |= (1 << result); 977 } else { 978 result -= 96; 979 hash4 |= (1 << result); 980 } 981 } 982 } 983 984 /* Write out the new hash to the device */ 985 pm_csr = readl(&adapter->regs->global.pm_csr); 986 if (!et1310_in_phy_coma(adapter)) { 987 writel(hash1, &rxmac->multi_hash1); 988 writel(hash2, &rxmac->multi_hash2); 989 writel(hash3, &rxmac->multi_hash3); 990 writel(hash4, &rxmac->multi_hash4); 991 } 992 } 993 994 static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) 995 { 996 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 997 u32 uni_pf1; 998 u32 uni_pf2; 999 u32 uni_pf3; 1000 u32 pm_csr; 1001 1002 /* Set up unicast packet filter reg 3 to be the first two octets of 1003 * the MAC address for both address 1004 * 1005 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the 1006 * MAC address for second address 1007 * 1008 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the 1009 * MAC address for first address 1010 */ 1011 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) | 1012 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) | 1013 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) | 1014 adapter->addr[1]; 1015 1016 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) | 1017 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) | 1018 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) | 1019 adapter->addr[5]; 1020 1021 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) | 1022 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) | 1023 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) | 1024 adapter->addr[5]; 1025 1026 pm_csr = readl(&adapter->regs->global.pm_csr); 1027 if (!et1310_in_phy_coma(adapter)) { 1028 writel(uni_pf1, &rxmac->uni_pf_addr1); 1029 writel(uni_pf2, &rxmac->uni_pf_addr2); 1030 writel(uni_pf3, &rxmac->uni_pf_addr3); 1031 } 1032 } 1033 1034 static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) 1035 { 1036 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1037 struct phy_device *phydev = adapter->netdev->phydev; 1038 u32 sa_lo; 1039 u32 sa_hi = 0; 1040 u32 pf_ctrl = 0; 1041 u32 __iomem *wolw; 1042 1043 /* Disable the MAC while it is being configured (also disable WOL) */ 1044 writel(0x8, &rxmac->ctrl); 1045 1046 /* Initialize WOL to disabled. */ 1047 writel(0, &rxmac->crc0); 1048 writel(0, &rxmac->crc12); 1049 writel(0, &rxmac->crc34); 1050 1051 /* We need to set the WOL mask0 - mask4 next. We initialize it to 1052 * its default Values of 0x00000000 because there are not WOL masks 1053 * as of this time. 1054 */ 1055 for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++) 1056 writel(0, wolw); 1057 1058 /* Lets setup the WOL Source Address */ 1059 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) | 1060 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) | 1061 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) | 1062 adapter->addr[5]; 1063 writel(sa_lo, &rxmac->sa_lo); 1064 1065 sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) | 1066 adapter->addr[1]; 1067 writel(sa_hi, &rxmac->sa_hi); 1068 1069 /* Disable all Packet Filtering */ 1070 writel(0, &rxmac->pf_ctrl); 1071 1072 /* Let's initialize the Unicast Packet filtering address */ 1073 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { 1074 et1310_setup_device_for_unicast(adapter); 1075 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE; 1076 } else { 1077 writel(0, &rxmac->uni_pf_addr1); 1078 writel(0, &rxmac->uni_pf_addr2); 1079 writel(0, &rxmac->uni_pf_addr3); 1080 } 1081 1082 /* Let's initialize the Multicast hash */ 1083 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { 1084 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE; 1085 et1310_setup_device_for_multicast(adapter); 1086 } 1087 1088 /* Runt packet filtering. Didn't work in version A silicon. */ 1089 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT; 1090 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE; 1091 1092 if (adapter->registry_jumbo_packet > 8192) 1093 /* In order to transmit jumbo packets greater than 8k, the 1094 * FIFO between RxMAC and RxDMA needs to be reduced in size 1095 * to (16k - Jumbo packet size). In order to implement this, 1096 * we must use "cut through" mode in the RxMAC, which chops 1097 * packets down into segments which are (max_size * 16). In 1098 * this case we selected 256 bytes, since this is the size of 1099 * the PCI-Express TLP's that the 1310 uses. 1100 * 1101 * seg_en on, fc_en off, size 0x10 1102 */ 1103 writel(0x41, &rxmac->mcif_ctrl_max_seg); 1104 else 1105 writel(0, &rxmac->mcif_ctrl_max_seg); 1106 1107 writel(0, &rxmac->mcif_water_mark); 1108 writel(0, &rxmac->mif_ctrl); 1109 writel(0, &rxmac->space_avail); 1110 1111 /* Initialize the the mif_ctrl register 1112 * bit 3: Receive code error. One or more nibbles were signaled as 1113 * errors during the reception of the packet. Clear this 1114 * bit in Gigabit, set it in 100Mbit. This was derived 1115 * experimentally at UNH. 1116 * bit 4: Receive CRC error. The packet's CRC did not match the 1117 * internally generated CRC. 1118 * bit 5: Receive length check error. Indicates that frame length 1119 * field value in the packet does not match the actual data 1120 * byte length and is not a type field. 1121 * bit 16: Receive frame truncated. 1122 * bit 17: Drop packet enable 1123 */ 1124 if (phydev && phydev->speed == SPEED_100) 1125 writel(0x30038, &rxmac->mif_ctrl); 1126 else 1127 writel(0x30030, &rxmac->mif_ctrl); 1128 1129 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet 1130 * filter is always enabled since it is where the runt packets are 1131 * supposed to be dropped. For version A silicon, runt packet 1132 * dropping doesn't work, so it is disabled in the pf_ctrl register, 1133 * but we still leave the packet filter on. 1134 */ 1135 writel(pf_ctrl, &rxmac->pf_ctrl); 1136 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl); 1137 } 1138 1139 static void et1310_config_txmac_regs(struct et131x_adapter *adapter) 1140 { 1141 struct txmac_regs __iomem *txmac = &adapter->regs->txmac; 1142 1143 /* We need to update the Control Frame Parameters 1144 * cfpt - control frame pause timer set to 64 (0x40) 1145 * cfep - control frame extended pause timer set to 0x0 1146 */ 1147 if (adapter->flow == FLOW_NONE) 1148 writel(0, &txmac->cf_param); 1149 else 1150 writel(0x40, &txmac->cf_param); 1151 } 1152 1153 static void et1310_config_macstat_regs(struct et131x_adapter *adapter) 1154 { 1155 struct macstat_regs __iomem *macstat = &adapter->regs->macstat; 1156 u32 __iomem *reg; 1157 1158 /* initialize all the macstat registers to zero on the device */ 1159 for (reg = &macstat->txrx_0_64_byte_frames; 1160 reg <= &macstat->carry_reg2; reg++) 1161 writel(0, reg); 1162 1163 /* Unmask any counters that we want to track the overflow of. 1164 * Initially this will be all counters. It may become clear later 1165 * that we do not need to track all counters. 1166 */ 1167 writel(0xFFFFBE32, &macstat->carry_reg1_mask); 1168 writel(0xFFFE7E8B, &macstat->carry_reg2_mask); 1169 } 1170 1171 static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, 1172 u8 reg, u16 *value) 1173 { 1174 struct mac_regs __iomem *mac = &adapter->regs->mac; 1175 int status = 0; 1176 u32 delay = 0; 1177 u32 mii_addr; 1178 u32 mii_cmd; 1179 u32 mii_indicator; 1180 1181 /* Save a local copy of the registers we are dealing with so we can 1182 * set them back 1183 */ 1184 mii_addr = readl(&mac->mii_mgmt_addr); 1185 mii_cmd = readl(&mac->mii_mgmt_cmd); 1186 1187 /* Stop the current operation */ 1188 writel(0, &mac->mii_mgmt_cmd); 1189 1190 /* Set up the register we need to read from on the correct PHY */ 1191 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 1192 1193 writel(0x1, &mac->mii_mgmt_cmd); 1194 1195 do { 1196 udelay(50); 1197 delay++; 1198 mii_indicator = readl(&mac->mii_mgmt_indicator); 1199 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50); 1200 1201 /* If we hit the max delay, we could not read the register */ 1202 if (delay == 50) { 1203 dev_warn(&adapter->pdev->dev, 1204 "reg 0x%08x could not be read\n", reg); 1205 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 1206 mii_indicator); 1207 1208 status = -EIO; 1209 goto out; 1210 } 1211 1212 /* If we hit here we were able to read the register and we need to 1213 * return the value to the caller 1214 */ 1215 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK; 1216 1217 out: 1218 /* Stop the read operation */ 1219 writel(0, &mac->mii_mgmt_cmd); 1220 1221 /* set the registers we touched back to the state at which we entered 1222 * this function 1223 */ 1224 writel(mii_addr, &mac->mii_mgmt_addr); 1225 writel(mii_cmd, &mac->mii_mgmt_cmd); 1226 1227 return status; 1228 } 1229 1230 static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) 1231 { 1232 struct phy_device *phydev = adapter->netdev->phydev; 1233 1234 if (!phydev) 1235 return -EIO; 1236 1237 return et131x_phy_mii_read(adapter, phydev->mdio.addr, reg, value); 1238 } 1239 1240 static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, 1241 u16 value) 1242 { 1243 struct mac_regs __iomem *mac = &adapter->regs->mac; 1244 int status = 0; 1245 u32 delay = 0; 1246 u32 mii_addr; 1247 u32 mii_cmd; 1248 u32 mii_indicator; 1249 1250 /* Save a local copy of the registers we are dealing with so we can 1251 * set them back 1252 */ 1253 mii_addr = readl(&mac->mii_mgmt_addr); 1254 mii_cmd = readl(&mac->mii_mgmt_cmd); 1255 1256 /* Stop the current operation */ 1257 writel(0, &mac->mii_mgmt_cmd); 1258 1259 /* Set up the register we need to write to on the correct PHY */ 1260 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 1261 1262 /* Add the value to write to the registers to the mac */ 1263 writel(value, &mac->mii_mgmt_ctrl); 1264 1265 do { 1266 udelay(50); 1267 delay++; 1268 mii_indicator = readl(&mac->mii_mgmt_indicator); 1269 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100); 1270 1271 /* If we hit the max delay, we could not write the register */ 1272 if (delay == 100) { 1273 u16 tmp; 1274 1275 dev_warn(&adapter->pdev->dev, 1276 "reg 0x%08x could not be written", reg); 1277 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 1278 mii_indicator); 1279 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", 1280 readl(&mac->mii_mgmt_cmd)); 1281 1282 et131x_mii_read(adapter, reg, &tmp); 1283 1284 status = -EIO; 1285 } 1286 /* Stop the write operation */ 1287 writel(0, &mac->mii_mgmt_cmd); 1288 1289 /* set the registers we touched back to the state at which we entered 1290 * this function 1291 */ 1292 writel(mii_addr, &mac->mii_mgmt_addr); 1293 writel(mii_cmd, &mac->mii_mgmt_cmd); 1294 1295 return status; 1296 } 1297 1298 static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter, 1299 u16 regnum, 1300 u16 bitnum, 1301 u8 *value) 1302 { 1303 u16 reg; 1304 u16 mask = 1 << bitnum; 1305 1306 et131x_mii_read(adapter, regnum, ®); 1307 1308 *value = (reg & mask) >> bitnum; 1309 } 1310 1311 static void et1310_config_flow_control(struct et131x_adapter *adapter) 1312 { 1313 struct phy_device *phydev = adapter->netdev->phydev; 1314 1315 if (phydev->duplex == DUPLEX_HALF) { 1316 adapter->flow = FLOW_NONE; 1317 } else { 1318 char remote_pause, remote_async_pause; 1319 1320 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause); 1321 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause); 1322 1323 if (remote_pause && remote_async_pause) { 1324 adapter->flow = adapter->wanted_flow; 1325 } else if (remote_pause && !remote_async_pause) { 1326 if (adapter->wanted_flow == FLOW_BOTH) 1327 adapter->flow = FLOW_BOTH; 1328 else 1329 adapter->flow = FLOW_NONE; 1330 } else if (!remote_pause && !remote_async_pause) { 1331 adapter->flow = FLOW_NONE; 1332 } else { 1333 if (adapter->wanted_flow == FLOW_BOTH) 1334 adapter->flow = FLOW_RXONLY; 1335 else 1336 adapter->flow = FLOW_NONE; 1337 } 1338 } 1339 } 1340 1341 /* et1310_update_macstat_host_counters - Update local copy of the statistics */ 1342 static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) 1343 { 1344 struct ce_stats *stats = &adapter->stats; 1345 struct macstat_regs __iomem *macstat = 1346 &adapter->regs->macstat; 1347 1348 stats->tx_collisions += readl(&macstat->tx_total_collisions); 1349 stats->tx_first_collisions += readl(&macstat->tx_single_collisions); 1350 stats->tx_deferred += readl(&macstat->tx_deferred); 1351 stats->tx_excessive_collisions += 1352 readl(&macstat->tx_multiple_collisions); 1353 stats->tx_late_collisions += readl(&macstat->tx_late_collisions); 1354 stats->tx_underflows += readl(&macstat->tx_undersize_frames); 1355 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); 1356 1357 stats->rx_align_errs += readl(&macstat->rx_align_errs); 1358 stats->rx_crc_errs += readl(&macstat->rx_code_errs); 1359 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); 1360 stats->rx_overflows += readl(&macstat->rx_oversize_packets); 1361 stats->rx_code_violations += readl(&macstat->rx_fcs_errs); 1362 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); 1363 stats->rx_other_errs += readl(&macstat->rx_fragment_packets); 1364 } 1365 1366 /* et1310_handle_macstat_interrupt 1367 * 1368 * One of the MACSTAT counters has wrapped. Update the local copy of 1369 * the statistics held in the adapter structure, checking the "wrap" 1370 * bit for each counter. 1371 */ 1372 static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) 1373 { 1374 u32 carry_reg1; 1375 u32 carry_reg2; 1376 1377 /* Read the interrupt bits from the register(s). These are Clear On 1378 * Write. 1379 */ 1380 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); 1381 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); 1382 1383 writel(carry_reg1, &adapter->regs->macstat.carry_reg1); 1384 writel(carry_reg2, &adapter->regs->macstat.carry_reg2); 1385 1386 /* We need to do update the host copy of all the MAC_STAT counters. 1387 * For each counter, check it's overflow bit. If the overflow bit is 1388 * set, then increment the host version of the count by one complete 1389 * revolution of the counter. This routine is called when the counter 1390 * block indicates that one of the counters has wrapped. 1391 */ 1392 if (carry_reg1 & (1 << 14)) 1393 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; 1394 if (carry_reg1 & (1 << 8)) 1395 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; 1396 if (carry_reg1 & (1 << 7)) 1397 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; 1398 if (carry_reg1 & (1 << 2)) 1399 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; 1400 if (carry_reg1 & (1 << 6)) 1401 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; 1402 if (carry_reg1 & (1 << 3)) 1403 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; 1404 if (carry_reg1 & (1 << 0)) 1405 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; 1406 if (carry_reg2 & (1 << 16)) 1407 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; 1408 if (carry_reg2 & (1 << 15)) 1409 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; 1410 if (carry_reg2 & (1 << 6)) 1411 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; 1412 if (carry_reg2 & (1 << 8)) 1413 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; 1414 if (carry_reg2 & (1 << 5)) 1415 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; 1416 if (carry_reg2 & (1 << 4)) 1417 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; 1418 if (carry_reg2 & (1 << 2)) 1419 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; 1420 } 1421 1422 static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) 1423 { 1424 struct net_device *netdev = bus->priv; 1425 struct et131x_adapter *adapter = netdev_priv(netdev); 1426 u16 value; 1427 int ret; 1428 1429 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); 1430 1431 if (ret < 0) 1432 return ret; 1433 1434 return value; 1435 } 1436 1437 static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, 1438 int reg, u16 value) 1439 { 1440 struct net_device *netdev = bus->priv; 1441 struct et131x_adapter *adapter = netdev_priv(netdev); 1442 1443 return et131x_mii_write(adapter, phy_addr, reg, value); 1444 } 1445 1446 /* et1310_phy_power_switch - PHY power control 1447 * @adapter: device to control 1448 * @down: true for off/false for back on 1449 * 1450 * one hundred, ten, one thousand megs 1451 * How would you like to have your LAN accessed 1452 * Can't you see that this code processed 1453 * Phy power, phy power.. 1454 */ 1455 static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) 1456 { 1457 u16 data; 1458 struct phy_device *phydev = adapter->netdev->phydev; 1459 1460 et131x_mii_read(adapter, MII_BMCR, &data); 1461 data &= ~BMCR_PDOWN; 1462 if (down) 1463 data |= BMCR_PDOWN; 1464 et131x_mii_write(adapter, phydev->mdio.addr, MII_BMCR, data); 1465 } 1466 1467 /* et131x_xcvr_init - Init the phy if we are setting it into force mode */ 1468 static void et131x_xcvr_init(struct et131x_adapter *adapter) 1469 { 1470 u16 lcr2; 1471 struct phy_device *phydev = adapter->netdev->phydev; 1472 1473 /* Set the LED behavior such that LED 1 indicates speed (off = 1474 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates 1475 * link and activity (on for link, blink off for activity). 1476 * 1477 * NOTE: Some customizations have been added here for specific 1478 * vendors; The LED behavior is now determined by vendor data in the 1479 * EEPROM. However, the above description is the default. 1480 */ 1481 if ((adapter->eeprom_data[1] & 0x4) == 0) { 1482 et131x_mii_read(adapter, PHY_LED_2, &lcr2); 1483 1484 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T); 1485 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); 1486 1487 if ((adapter->eeprom_data[1] & 0x8) == 0) 1488 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); 1489 else 1490 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); 1491 1492 et131x_mii_write(adapter, phydev->mdio.addr, PHY_LED_2, lcr2); 1493 } 1494 } 1495 1496 /* et131x_configure_global_regs - configure JAGCore global regs */ 1497 static void et131x_configure_global_regs(struct et131x_adapter *adapter) 1498 { 1499 struct global_regs __iomem *regs = &adapter->regs->global; 1500 1501 writel(0, ®s->rxq_start_addr); 1502 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr); 1503 1504 if (adapter->registry_jumbo_packet < 2048) { 1505 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word 1506 * block of RAM that the driver can split between Tx 1507 * and Rx as it desires. Our default is to split it 1508 * 50/50: 1509 */ 1510 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr); 1511 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr); 1512 } else if (adapter->registry_jumbo_packet < 8192) { 1513 /* For jumbo packets > 2k but < 8k, split 50-50. */ 1514 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr); 1515 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr); 1516 } else { 1517 /* 9216 is the only packet size greater than 8k that 1518 * is available. The Tx buffer has to be big enough 1519 * for one whole packet on the Tx side. We'll make 1520 * the Tx 9408, and give the rest to Rx 1521 */ 1522 writel(0x01b3, ®s->rxq_end_addr); 1523 writel(0x01b4, ®s->txq_start_addr); 1524 } 1525 1526 /* Initialize the loopback register. Disable all loopbacks. */ 1527 writel(0, ®s->loopback); 1528 1529 writel(0, ®s->msi_config); 1530 1531 /* By default, disable the watchdog timer. It will be enabled when 1532 * a packet is queued. 1533 */ 1534 writel(0, ®s->watchdog_timer); 1535 } 1536 1537 /* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */ 1538 static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) 1539 { 1540 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 1541 struct rx_ring *rx_local = &adapter->rx_ring; 1542 struct fbr_desc *fbr_entry; 1543 u32 entry; 1544 u32 psr_num_des; 1545 unsigned long flags; 1546 u8 id; 1547 1548 et131x_rx_dma_disable(adapter); 1549 1550 /* Load the completion writeback physical address */ 1551 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi); 1552 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo); 1553 1554 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); 1555 1556 /* Set the address and parameters of the packet status ring */ 1557 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi); 1558 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo); 1559 writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des); 1560 writel(0, &rx_dma->psr_full_offset); 1561 1562 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK; 1563 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, 1564 &rx_dma->psr_min_des); 1565 1566 spin_lock_irqsave(&adapter->rcv_lock, flags); 1567 1568 /* These local variables track the PSR in the adapter structure */ 1569 rx_local->local_psr_full = 0; 1570 1571 for (id = 0; id < NUM_FBRS; id++) { 1572 u32 __iomem *num_des; 1573 u32 __iomem *full_offset; 1574 u32 __iomem *min_des; 1575 u32 __iomem *base_hi; 1576 u32 __iomem *base_lo; 1577 struct fbr_lookup *fbr = rx_local->fbr[id]; 1578 1579 if (id == 0) { 1580 num_des = &rx_dma->fbr0_num_des; 1581 full_offset = &rx_dma->fbr0_full_offset; 1582 min_des = &rx_dma->fbr0_min_des; 1583 base_hi = &rx_dma->fbr0_base_hi; 1584 base_lo = &rx_dma->fbr0_base_lo; 1585 } else { 1586 num_des = &rx_dma->fbr1_num_des; 1587 full_offset = &rx_dma->fbr1_full_offset; 1588 min_des = &rx_dma->fbr1_min_des; 1589 base_hi = &rx_dma->fbr1_base_hi; 1590 base_lo = &rx_dma->fbr1_base_lo; 1591 } 1592 1593 /* Now's the best time to initialize FBR contents */ 1594 fbr_entry = fbr->ring_virtaddr; 1595 for (entry = 0; entry < fbr->num_entries; entry++) { 1596 fbr_entry->addr_hi = fbr->bus_high[entry]; 1597 fbr_entry->addr_lo = fbr->bus_low[entry]; 1598 fbr_entry->word2 = entry; 1599 fbr_entry++; 1600 } 1601 1602 /* Set the address and parameters of Free buffer ring 1 and 0 */ 1603 writel(upper_32_bits(fbr->ring_physaddr), base_hi); 1604 writel(lower_32_bits(fbr->ring_physaddr), base_lo); 1605 writel(fbr->num_entries - 1, num_des); 1606 writel(ET_DMA10_WRAP, full_offset); 1607 1608 /* This variable tracks the free buffer ring 1 full position, 1609 * so it has to match the above. 1610 */ 1611 fbr->local_full = ET_DMA10_WRAP; 1612 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, 1613 min_des); 1614 } 1615 1616 /* Program the number of packets we will receive before generating an 1617 * interrupt. 1618 * For version B silicon, this value gets updated once autoneg is 1619 *complete. 1620 */ 1621 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); 1622 1623 /* The "time_done" is not working correctly to coalesce interrupts 1624 * after a given time period, but rather is giving us an interrupt 1625 * regardless of whether we have received packets. 1626 * This value gets updated once autoneg is complete. 1627 */ 1628 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); 1629 1630 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 1631 } 1632 1633 /* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. 1634 * 1635 * Configure the transmit engine with the ring buffers we have created 1636 * and prepare it for use. 1637 */ 1638 static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) 1639 { 1640 struct txdma_regs __iomem *txdma = &adapter->regs->txdma; 1641 struct tx_ring *tx_ring = &adapter->tx_ring; 1642 1643 /* Load the hardware with the start of the transmit descriptor ring. */ 1644 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi); 1645 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo); 1646 1647 /* Initialise the transmit DMA engine */ 1648 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); 1649 1650 /* Load the completion writeback physical address */ 1651 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi); 1652 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo); 1653 1654 *tx_ring->tx_status = 0; 1655 1656 writel(0, &txdma->service_request); 1657 tx_ring->send_idx = 0; 1658 } 1659 1660 /* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */ 1661 static void et131x_adapter_setup(struct et131x_adapter *adapter) 1662 { 1663 et131x_configure_global_regs(adapter); 1664 et1310_config_mac_regs1(adapter); 1665 1666 /* Configure the MMC registers */ 1667 /* All we need to do is initialize the Memory Control Register */ 1668 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); 1669 1670 et1310_config_rxmac_regs(adapter); 1671 et1310_config_txmac_regs(adapter); 1672 1673 et131x_config_rx_dma_regs(adapter); 1674 et131x_config_tx_dma_regs(adapter); 1675 1676 et1310_config_macstat_regs(adapter); 1677 1678 et1310_phy_power_switch(adapter, 0); 1679 et131x_xcvr_init(adapter); 1680 } 1681 1682 /* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */ 1683 static void et131x_soft_reset(struct et131x_adapter *adapter) 1684 { 1685 u32 reg; 1686 1687 /* Disable MAC Core */ 1688 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | 1689 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | 1690 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; 1691 writel(reg, &adapter->regs->mac.cfg1); 1692 1693 reg = ET_RESET_ALL; 1694 writel(reg, &adapter->regs->global.sw_reset); 1695 1696 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | 1697 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; 1698 writel(reg, &adapter->regs->mac.cfg1); 1699 writel(0, &adapter->regs->mac.cfg1); 1700 } 1701 1702 static void et131x_enable_interrupts(struct et131x_adapter *adapter) 1703 { 1704 u32 mask; 1705 1706 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) 1707 mask = INT_MASK_ENABLE; 1708 else 1709 mask = INT_MASK_ENABLE_NO_FLOW; 1710 1711 writel(mask, &adapter->regs->global.int_mask); 1712 } 1713 1714 static void et131x_disable_interrupts(struct et131x_adapter *adapter) 1715 { 1716 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); 1717 } 1718 1719 static void et131x_tx_dma_disable(struct et131x_adapter *adapter) 1720 { 1721 /* Setup the transmit dma configuration register */ 1722 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT, 1723 &adapter->regs->txdma.csr); 1724 } 1725 1726 static void et131x_enable_txrx(struct net_device *netdev) 1727 { 1728 struct et131x_adapter *adapter = netdev_priv(netdev); 1729 1730 et131x_rx_dma_enable(adapter); 1731 et131x_tx_dma_enable(adapter); 1732 1733 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE) 1734 et131x_enable_interrupts(adapter); 1735 1736 netif_start_queue(netdev); 1737 } 1738 1739 static void et131x_disable_txrx(struct net_device *netdev) 1740 { 1741 struct et131x_adapter *adapter = netdev_priv(netdev); 1742 1743 netif_stop_queue(netdev); 1744 1745 et131x_rx_dma_disable(adapter); 1746 et131x_tx_dma_disable(adapter); 1747 1748 et131x_disable_interrupts(adapter); 1749 } 1750 1751 static void et131x_init_send(struct et131x_adapter *adapter) 1752 { 1753 int i; 1754 struct tx_ring *tx_ring = &adapter->tx_ring; 1755 struct tcb *tcb = tx_ring->tcb_ring; 1756 1757 tx_ring->tcb_qhead = tcb; 1758 1759 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); 1760 1761 for (i = 0; i < NUM_TCB; i++) { 1762 tcb->next = tcb + 1; 1763 tcb++; 1764 } 1765 1766 tcb--; 1767 tx_ring->tcb_qtail = tcb; 1768 tcb->next = NULL; 1769 /* Curr send queue should now be empty */ 1770 tx_ring->send_head = NULL; 1771 tx_ring->send_tail = NULL; 1772 } 1773 1774 /* et1310_enable_phy_coma 1775 * 1776 * driver receive an phy status change interrupt while in D0 and check that 1777 * phy_status is down. 1778 * 1779 * -- gate off JAGCore; 1780 * -- set gigE PHY in Coma mode 1781 * -- wake on phy_interrupt; Perform software reset JAGCore, 1782 * re-initialize jagcore and gigE PHY 1783 */ 1784 static void et1310_enable_phy_coma(struct et131x_adapter *adapter) 1785 { 1786 u32 pmcsr = readl(&adapter->regs->global.pm_csr); 1787 1788 /* Stop sending packets. */ 1789 adapter->flags |= FMP_ADAPTER_LOWER_POWER; 1790 1791 /* Wait for outstanding Receive packets */ 1792 et131x_disable_txrx(adapter->netdev); 1793 1794 /* Gate off JAGCore 3 clock domains */ 1795 pmcsr &= ~ET_PMCSR_INIT; 1796 writel(pmcsr, &adapter->regs->global.pm_csr); 1797 1798 /* Program gigE PHY in to Coma mode */ 1799 pmcsr |= ET_PM_PHY_SW_COMA; 1800 writel(pmcsr, &adapter->regs->global.pm_csr); 1801 } 1802 1803 static void et1310_disable_phy_coma(struct et131x_adapter *adapter) 1804 { 1805 u32 pmcsr; 1806 1807 pmcsr = readl(&adapter->regs->global.pm_csr); 1808 1809 /* Disable phy_sw_coma register and re-enable JAGCore clocks */ 1810 pmcsr |= ET_PMCSR_INIT; 1811 pmcsr &= ~ET_PM_PHY_SW_COMA; 1812 writel(pmcsr, &adapter->regs->global.pm_csr); 1813 1814 /* Restore the GbE PHY speed and duplex modes; 1815 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY 1816 */ 1817 1818 /* Re-initialize the send structures */ 1819 et131x_init_send(adapter); 1820 1821 /* Bring the device back to the state it was during init prior to 1822 * autonegotiation being complete. This way, when we get the auto-neg 1823 * complete interrupt, we can complete init by calling ConfigMacREGS2. 1824 */ 1825 et131x_soft_reset(adapter); 1826 1827 et131x_adapter_setup(adapter); 1828 1829 /* Allow Tx to restart */ 1830 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER; 1831 1832 et131x_enable_txrx(adapter->netdev); 1833 } 1834 1835 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) 1836 { 1837 u32 tmp_free_buff_ring = *free_buff_ring; 1838 1839 tmp_free_buff_ring++; 1840 /* This works for all cases where limit < 1024. The 1023 case 1841 * works because 1023++ is 1024 which means the if condition is not 1842 * taken but the carry of the bit into the wrap bit toggles the wrap 1843 * value correctly 1844 */ 1845 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { 1846 tmp_free_buff_ring &= ~ET_DMA10_MASK; 1847 tmp_free_buff_ring ^= ET_DMA10_WRAP; 1848 } 1849 /* For the 1023 case */ 1850 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP); 1851 *free_buff_ring = tmp_free_buff_ring; 1852 return tmp_free_buff_ring; 1853 } 1854 1855 /* et131x_rx_dma_memory_alloc 1856 * 1857 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, 1858 * and the Packet Status Ring. 1859 */ 1860 static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) 1861 { 1862 u8 id; 1863 u32 i, j; 1864 u32 bufsize; 1865 u32 psr_size; 1866 u32 fbr_chunksize; 1867 struct rx_ring *rx_ring = &adapter->rx_ring; 1868 struct fbr_lookup *fbr; 1869 1870 /* Alloc memory for the lookup table */ 1871 rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL); 1872 if (rx_ring->fbr[0] == NULL) 1873 return -ENOMEM; 1874 rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL); 1875 if (rx_ring->fbr[1] == NULL) 1876 return -ENOMEM; 1877 1878 /* The first thing we will do is configure the sizes of the buffer 1879 * rings. These will change based on jumbo packet support. Larger 1880 * jumbo packets increases the size of each entry in FBR0, and the 1881 * number of entries in FBR0, while at the same time decreasing the 1882 * number of entries in FBR1. 1883 * 1884 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 1885 * entries are huge in order to accommodate a "jumbo" frame, then it 1886 * will have less entries. Conversely, FBR1 will now be relied upon 1887 * to carry more "normal" frames, thus it's entry size also increases 1888 * and the number of entries goes up too (since it now carries 1889 * "small" + "regular" packets. 1890 * 1891 * In this scheme, we try to maintain 512 entries between the two 1892 * rings. Also, FBR1 remains a constant size - when it's size doubles 1893 * the number of entries halves. FBR0 increases in size, however. 1894 */ 1895 if (adapter->registry_jumbo_packet < 2048) { 1896 rx_ring->fbr[0]->buffsize = 256; 1897 rx_ring->fbr[0]->num_entries = 512; 1898 rx_ring->fbr[1]->buffsize = 2048; 1899 rx_ring->fbr[1]->num_entries = 512; 1900 } else if (adapter->registry_jumbo_packet < 4096) { 1901 rx_ring->fbr[0]->buffsize = 512; 1902 rx_ring->fbr[0]->num_entries = 1024; 1903 rx_ring->fbr[1]->buffsize = 4096; 1904 rx_ring->fbr[1]->num_entries = 512; 1905 } else { 1906 rx_ring->fbr[0]->buffsize = 1024; 1907 rx_ring->fbr[0]->num_entries = 768; 1908 rx_ring->fbr[1]->buffsize = 16384; 1909 rx_ring->fbr[1]->num_entries = 128; 1910 } 1911 1912 rx_ring->psr_entries = rx_ring->fbr[0]->num_entries + 1913 rx_ring->fbr[1]->num_entries; 1914 1915 for (id = 0; id < NUM_FBRS; id++) { 1916 fbr = rx_ring->fbr[id]; 1917 /* Allocate an area of memory for Free Buffer Ring */ 1918 bufsize = sizeof(struct fbr_desc) * fbr->num_entries; 1919 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, 1920 bufsize, 1921 &fbr->ring_physaddr, 1922 GFP_KERNEL); 1923 if (!fbr->ring_virtaddr) { 1924 dev_err(&adapter->pdev->dev, 1925 "Cannot alloc memory for Free Buffer Ring %d\n", 1926 id); 1927 return -ENOMEM; 1928 } 1929 } 1930 1931 for (id = 0; id < NUM_FBRS; id++) { 1932 fbr = rx_ring->fbr[id]; 1933 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize); 1934 1935 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) { 1936 dma_addr_t fbr_physaddr; 1937 1938 fbr->mem_virtaddrs[i] = dma_alloc_coherent( 1939 &adapter->pdev->dev, fbr_chunksize, 1940 &fbr->mem_physaddrs[i], 1941 GFP_KERNEL); 1942 1943 if (!fbr->mem_virtaddrs[i]) { 1944 dev_err(&adapter->pdev->dev, 1945 "Could not alloc memory\n"); 1946 return -ENOMEM; 1947 } 1948 1949 /* See NOTE in "Save Physical Address" comment above */ 1950 fbr_physaddr = fbr->mem_physaddrs[i]; 1951 1952 for (j = 0; j < FBR_CHUNKS; j++) { 1953 u32 k = (i * FBR_CHUNKS) + j; 1954 1955 /* Save the Virtual address of this index for 1956 * quick access later 1957 */ 1958 fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] + 1959 (j * fbr->buffsize); 1960 1961 /* now store the physical address in the 1962 * descriptor so the device can access it 1963 */ 1964 fbr->bus_high[k] = upper_32_bits(fbr_physaddr); 1965 fbr->bus_low[k] = lower_32_bits(fbr_physaddr); 1966 fbr_physaddr += fbr->buffsize; 1967 } 1968 } 1969 } 1970 1971 /* Allocate an area of memory for FIFO of Packet Status ring entries */ 1972 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; 1973 1974 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, 1975 psr_size, 1976 &rx_ring->ps_ring_physaddr, 1977 GFP_KERNEL); 1978 1979 if (!rx_ring->ps_ring_virtaddr) { 1980 dev_err(&adapter->pdev->dev, 1981 "Cannot alloc memory for Packet Status Ring\n"); 1982 return -ENOMEM; 1983 } 1984 1985 /* Allocate an area of memory for writeback of status information */ 1986 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, 1987 sizeof(struct rx_status_block), 1988 &rx_ring->rx_status_bus, 1989 GFP_KERNEL); 1990 if (!rx_ring->rx_status_block) { 1991 dev_err(&adapter->pdev->dev, 1992 "Cannot alloc memory for Status Block\n"); 1993 return -ENOMEM; 1994 } 1995 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; 1996 1997 /* The RFDs are going to be put on lists later on, so initialize the 1998 * lists now. 1999 */ 2000 INIT_LIST_HEAD(&rx_ring->recv_list); 2001 return 0; 2002 } 2003 2004 static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) 2005 { 2006 u8 id; 2007 u32 ii; 2008 u32 bufsize; 2009 u32 psr_size; 2010 struct rfd *rfd; 2011 struct rx_ring *rx_ring = &adapter->rx_ring; 2012 struct fbr_lookup *fbr; 2013 2014 /* Free RFDs and associated packet descriptors */ 2015 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); 2016 2017 while (!list_empty(&rx_ring->recv_list)) { 2018 rfd = list_entry(rx_ring->recv_list.next, 2019 struct rfd, list_node); 2020 2021 list_del(&rfd->list_node); 2022 rfd->skb = NULL; 2023 kfree(rfd); 2024 } 2025 2026 /* Free Free Buffer Rings */ 2027 for (id = 0; id < NUM_FBRS; id++) { 2028 fbr = rx_ring->fbr[id]; 2029 2030 if (!fbr || !fbr->ring_virtaddr) 2031 continue; 2032 2033 /* First the packet memory */ 2034 for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) { 2035 if (fbr->mem_virtaddrs[ii]) { 2036 bufsize = fbr->buffsize * FBR_CHUNKS; 2037 2038 dma_free_coherent(&adapter->pdev->dev, 2039 bufsize, 2040 fbr->mem_virtaddrs[ii], 2041 fbr->mem_physaddrs[ii]); 2042 2043 fbr->mem_virtaddrs[ii] = NULL; 2044 } 2045 } 2046 2047 bufsize = sizeof(struct fbr_desc) * fbr->num_entries; 2048 2049 dma_free_coherent(&adapter->pdev->dev, 2050 bufsize, 2051 fbr->ring_virtaddr, 2052 fbr->ring_physaddr); 2053 2054 fbr->ring_virtaddr = NULL; 2055 } 2056 2057 /* Free Packet Status Ring */ 2058 if (rx_ring->ps_ring_virtaddr) { 2059 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; 2060 2061 dma_free_coherent(&adapter->pdev->dev, psr_size, 2062 rx_ring->ps_ring_virtaddr, 2063 rx_ring->ps_ring_physaddr); 2064 2065 rx_ring->ps_ring_virtaddr = NULL; 2066 } 2067 2068 /* Free area of memory for the writeback of status information */ 2069 if (rx_ring->rx_status_block) { 2070 dma_free_coherent(&adapter->pdev->dev, 2071 sizeof(struct rx_status_block), 2072 rx_ring->rx_status_block, 2073 rx_ring->rx_status_bus); 2074 rx_ring->rx_status_block = NULL; 2075 } 2076 2077 /* Free the FBR Lookup Table */ 2078 kfree(rx_ring->fbr[0]); 2079 kfree(rx_ring->fbr[1]); 2080 2081 /* Reset Counters */ 2082 rx_ring->num_ready_recv = 0; 2083 } 2084 2085 /* et131x_init_recv - Initialize receive data structures */ 2086 static int et131x_init_recv(struct et131x_adapter *adapter) 2087 { 2088 struct rfd *rfd; 2089 u32 rfdct; 2090 struct rx_ring *rx_ring = &adapter->rx_ring; 2091 2092 /* Setup each RFD */ 2093 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { 2094 rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA); 2095 if (!rfd) 2096 return -ENOMEM; 2097 2098 rfd->skb = NULL; 2099 2100 /* Add this RFD to the recv_list */ 2101 list_add_tail(&rfd->list_node, &rx_ring->recv_list); 2102 2103 /* Increment the available RFD's */ 2104 rx_ring->num_ready_recv++; 2105 } 2106 2107 return 0; 2108 } 2109 2110 /* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ 2111 static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) 2112 { 2113 struct phy_device *phydev = adapter->netdev->phydev; 2114 2115 /* For version B silicon, we do not use the RxDMA timer for 10 and 100 2116 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. 2117 */ 2118 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { 2119 writel(0, &adapter->regs->rxdma.max_pkt_time); 2120 writel(1, &adapter->regs->rxdma.num_pkt_done); 2121 } 2122 } 2123 2124 /* nic_return_rfd - Recycle a RFD and put it back onto the receive list */ 2125 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) 2126 { 2127 struct rx_ring *rx_local = &adapter->rx_ring; 2128 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 2129 u16 buff_index = rfd->bufferindex; 2130 u8 ring_index = rfd->ringindex; 2131 unsigned long flags; 2132 struct fbr_lookup *fbr = rx_local->fbr[ring_index]; 2133 2134 /* We don't use any of the OOB data besides status. Otherwise, we 2135 * need to clean up OOB data 2136 */ 2137 if (buff_index < fbr->num_entries) { 2138 u32 free_buff_ring; 2139 u32 __iomem *offset; 2140 struct fbr_desc *next; 2141 2142 if (ring_index == 0) 2143 offset = &rx_dma->fbr0_full_offset; 2144 else 2145 offset = &rx_dma->fbr1_full_offset; 2146 2147 next = (struct fbr_desc *)(fbr->ring_virtaddr) + 2148 INDEX10(fbr->local_full); 2149 2150 /* Handle the Free Buffer Ring advancement here. Write 2151 * the PA / Buffer Index for the returned buffer into 2152 * the oldest (next to be freed)FBR entry 2153 */ 2154 next->addr_hi = fbr->bus_high[buff_index]; 2155 next->addr_lo = fbr->bus_low[buff_index]; 2156 next->word2 = buff_index; 2157 2158 free_buff_ring = bump_free_buff_ring(&fbr->local_full, 2159 fbr->num_entries - 1); 2160 writel(free_buff_ring, offset); 2161 } else { 2162 dev_err(&adapter->pdev->dev, 2163 "%s illegal Buffer Index returned\n", __func__); 2164 } 2165 2166 /* The processing on this RFD is done, so put it back on the tail of 2167 * our list 2168 */ 2169 spin_lock_irqsave(&adapter->rcv_lock, flags); 2170 list_add_tail(&rfd->list_node, &rx_local->recv_list); 2171 rx_local->num_ready_recv++; 2172 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2173 2174 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); 2175 } 2176 2177 /* nic_rx_pkts - Checks the hardware for available packets 2178 * 2179 * Checks the hardware for available packets, using completion ring 2180 * If packets are available, it gets an RFD from the recv_list, attaches 2181 * the packet to it, puts the RFD in the RecvPendList, and also returns 2182 * the pointer to the RFD. 2183 */ 2184 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) 2185 { 2186 struct rx_ring *rx_local = &adapter->rx_ring; 2187 struct rx_status_block *status; 2188 struct pkt_stat_desc *psr; 2189 struct rfd *rfd; 2190 unsigned long flags; 2191 struct list_head *element; 2192 u8 ring_index; 2193 u16 buff_index; 2194 u32 len; 2195 u32 word0; 2196 u32 word1; 2197 struct sk_buff *skb; 2198 struct fbr_lookup *fbr; 2199 2200 /* RX Status block is written by the DMA engine prior to every 2201 * interrupt. It contains the next to be used entry in the Packet 2202 * Status Ring, and also the two Free Buffer rings. 2203 */ 2204 status = rx_local->rx_status_block; 2205 word1 = status->word1 >> 16; 2206 2207 /* Check the PSR and wrap bits do not match */ 2208 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) 2209 return NULL; /* Looks like this ring is not updated yet */ 2210 2211 /* The packet status ring indicates that data is available. */ 2212 psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) + 2213 (rx_local->local_psr_full & 0xFFF); 2214 2215 /* Grab any information that is required once the PSR is advanced, 2216 * since we can no longer rely on the memory being accurate 2217 */ 2218 len = psr->word1 & 0xFFFF; 2219 ring_index = (psr->word1 >> 26) & 0x03; 2220 fbr = rx_local->fbr[ring_index]; 2221 buff_index = (psr->word1 >> 16) & 0x3FF; 2222 word0 = psr->word0; 2223 2224 /* Indicate that we have used this PSR entry. */ 2225 /* FIXME wrap 12 */ 2226 add_12bit(&rx_local->local_psr_full, 1); 2227 if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) { 2228 /* Clear psr full and toggle the wrap bit */ 2229 rx_local->local_psr_full &= ~0xFFF; 2230 rx_local->local_psr_full ^= 0x1000; 2231 } 2232 2233 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset); 2234 2235 if (ring_index > 1 || buff_index > fbr->num_entries - 1) { 2236 /* Illegal buffer or ring index cannot be used by S/W*/ 2237 dev_err(&adapter->pdev->dev, 2238 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n", 2239 rx_local->local_psr_full & 0xFFF, len, buff_index); 2240 return NULL; 2241 } 2242 2243 /* Get and fill the RFD. */ 2244 spin_lock_irqsave(&adapter->rcv_lock, flags); 2245 2246 element = rx_local->recv_list.next; 2247 rfd = list_entry(element, struct rfd, list_node); 2248 2249 if (!rfd) { 2250 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2251 return NULL; 2252 } 2253 2254 list_del(&rfd->list_node); 2255 rx_local->num_ready_recv--; 2256 2257 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2258 2259 rfd->bufferindex = buff_index; 2260 rfd->ringindex = ring_index; 2261 2262 /* In V1 silicon, there is a bug which screws up filtering of runt 2263 * packets. Therefore runt packet filtering is disabled in the MAC and 2264 * the packets are dropped here. They are also counted here. 2265 */ 2266 if (len < (NIC_MIN_PACKET_SIZE + 4)) { 2267 adapter->stats.rx_other_errs++; 2268 rfd->len = 0; 2269 goto out; 2270 } 2271 2272 if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT)) 2273 adapter->stats.multicast_pkts_rcvd++; 2274 2275 rfd->len = len; 2276 2277 skb = dev_alloc_skb(rfd->len + 2); 2278 if (!skb) 2279 return NULL; 2280 2281 adapter->netdev->stats.rx_bytes += rfd->len; 2282 2283 memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len); 2284 2285 skb->protocol = eth_type_trans(skb, adapter->netdev); 2286 skb->ip_summed = CHECKSUM_NONE; 2287 netif_receive_skb(skb); 2288 2289 out: 2290 nic_return_rfd(adapter, rfd); 2291 return rfd; 2292 } 2293 2294 static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget) 2295 { 2296 struct rfd *rfd = NULL; 2297 int count = 0; 2298 int limit = budget; 2299 bool done = true; 2300 struct rx_ring *rx_ring = &adapter->rx_ring; 2301 2302 if (budget > MAX_PACKETS_HANDLED) 2303 limit = MAX_PACKETS_HANDLED; 2304 2305 /* Process up to available RFD's */ 2306 while (count < limit) { 2307 if (list_empty(&rx_ring->recv_list)) { 2308 WARN_ON(rx_ring->num_ready_recv != 0); 2309 done = false; 2310 break; 2311 } 2312 2313 rfd = nic_rx_pkts(adapter); 2314 2315 if (rfd == NULL) 2316 break; 2317 2318 /* Do not receive any packets until a filter has been set. 2319 * Do not receive any packets until we have link. 2320 * If length is zero, return the RFD in order to advance the 2321 * Free buffer ring. 2322 */ 2323 if (!adapter->packet_filter || 2324 !netif_carrier_ok(adapter->netdev) || 2325 rfd->len == 0) 2326 continue; 2327 2328 adapter->netdev->stats.rx_packets++; 2329 2330 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK) 2331 dev_warn(&adapter->pdev->dev, "RFD's are running out\n"); 2332 2333 count++; 2334 } 2335 2336 if (count == limit || !done) { 2337 rx_ring->unfinished_receives = true; 2338 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 2339 &adapter->regs->global.watchdog_timer); 2340 } else { 2341 /* Watchdog timer will disable itself if appropriate. */ 2342 rx_ring->unfinished_receives = false; 2343 } 2344 2345 return count; 2346 } 2347 2348 /* et131x_tx_dma_memory_alloc 2349 * 2350 * Allocates memory that will be visible both to the device and to the CPU. 2351 * The OS will pass us packets, pointers to which we will insert in the Tx 2352 * Descriptor queue. The device will read this queue to find the packets in 2353 * memory. The device will update the "status" in memory each time it xmits a 2354 * packet. 2355 */ 2356 static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) 2357 { 2358 int desc_size = 0; 2359 struct tx_ring *tx_ring = &adapter->tx_ring; 2360 2361 /* Allocate memory for the TCB's (Transmit Control Block) */ 2362 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), 2363 GFP_ATOMIC | GFP_DMA); 2364 if (!tx_ring->tcb_ring) 2365 return -ENOMEM; 2366 2367 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); 2368 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev, 2369 desc_size, 2370 &tx_ring->tx_desc_ring_pa, 2371 GFP_KERNEL); 2372 if (!tx_ring->tx_desc_ring) { 2373 dev_err(&adapter->pdev->dev, 2374 "Cannot alloc memory for Tx Ring\n"); 2375 return -ENOMEM; 2376 } 2377 2378 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, 2379 sizeof(u32), 2380 &tx_ring->tx_status_pa, 2381 GFP_KERNEL); 2382 if (!tx_ring->tx_status) { 2383 dev_err(&adapter->pdev->dev, 2384 "Cannot alloc memory for Tx status block\n"); 2385 return -ENOMEM; 2386 } 2387 return 0; 2388 } 2389 2390 static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) 2391 { 2392 int desc_size = 0; 2393 struct tx_ring *tx_ring = &adapter->tx_ring; 2394 2395 if (tx_ring->tx_desc_ring) { 2396 /* Free memory relating to Tx rings here */ 2397 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); 2398 dma_free_coherent(&adapter->pdev->dev, 2399 desc_size, 2400 tx_ring->tx_desc_ring, 2401 tx_ring->tx_desc_ring_pa); 2402 tx_ring->tx_desc_ring = NULL; 2403 } 2404 2405 /* Free memory for the Tx status block */ 2406 if (tx_ring->tx_status) { 2407 dma_free_coherent(&adapter->pdev->dev, 2408 sizeof(u32), 2409 tx_ring->tx_status, 2410 tx_ring->tx_status_pa); 2411 2412 tx_ring->tx_status = NULL; 2413 } 2414 /* Free the memory for the tcb structures */ 2415 kfree(tx_ring->tcb_ring); 2416 } 2417 2418 /* nic_send_packet - NIC specific send handler for version B silicon. */ 2419 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) 2420 { 2421 u32 i; 2422 struct tx_desc desc[24]; 2423 u32 frag = 0; 2424 u32 thiscopy, remainder; 2425 struct sk_buff *skb = tcb->skb; 2426 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; 2427 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; 2428 struct phy_device *phydev = adapter->netdev->phydev; 2429 dma_addr_t dma_addr; 2430 struct tx_ring *tx_ring = &adapter->tx_ring; 2431 2432 /* Part of the optimizations of this send routine restrict us to 2433 * sending 24 fragments at a pass. In practice we should never see 2434 * more than 5 fragments. 2435 */ 2436 2437 /* nr_frags should be no more than 18. */ 2438 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23); 2439 2440 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); 2441 2442 for (i = 0; i < nr_frags; i++) { 2443 /* If there is something in this element, lets get a 2444 * descriptor from the ring and get the necessary data 2445 */ 2446 if (i == 0) { 2447 /* If the fragments are smaller than a standard MTU, 2448 * then map them to a single descriptor in the Tx 2449 * Desc ring. However, if they're larger, as is 2450 * possible with support for jumbo packets, then 2451 * split them each across 2 descriptors. 2452 * 2453 * This will work until we determine why the hardware 2454 * doesn't seem to like large fragments. 2455 */ 2456 if (skb_headlen(skb) <= 1514) { 2457 /* Low 16bits are length, high is vlan and 2458 * unused currently so zero 2459 */ 2460 desc[frag].len_vlan = skb_headlen(skb); 2461 dma_addr = dma_map_single(&adapter->pdev->dev, 2462 skb->data, 2463 skb_headlen(skb), 2464 DMA_TO_DEVICE); 2465 desc[frag].addr_lo = lower_32_bits(dma_addr); 2466 desc[frag].addr_hi = upper_32_bits(dma_addr); 2467 frag++; 2468 } else { 2469 desc[frag].len_vlan = skb_headlen(skb) / 2; 2470 dma_addr = dma_map_single(&adapter->pdev->dev, 2471 skb->data, 2472 skb_headlen(skb) / 2, 2473 DMA_TO_DEVICE); 2474 desc[frag].addr_lo = lower_32_bits(dma_addr); 2475 desc[frag].addr_hi = upper_32_bits(dma_addr); 2476 frag++; 2477 2478 desc[frag].len_vlan = skb_headlen(skb) / 2; 2479 dma_addr = dma_map_single(&adapter->pdev->dev, 2480 skb->data + 2481 skb_headlen(skb) / 2, 2482 skb_headlen(skb) / 2, 2483 DMA_TO_DEVICE); 2484 desc[frag].addr_lo = lower_32_bits(dma_addr); 2485 desc[frag].addr_hi = upper_32_bits(dma_addr); 2486 frag++; 2487 } 2488 } else { 2489 desc[frag].len_vlan = frags[i - 1].size; 2490 dma_addr = skb_frag_dma_map(&adapter->pdev->dev, 2491 &frags[i - 1], 2492 0, 2493 frags[i - 1].size, 2494 DMA_TO_DEVICE); 2495 desc[frag].addr_lo = lower_32_bits(dma_addr); 2496 desc[frag].addr_hi = upper_32_bits(dma_addr); 2497 frag++; 2498 } 2499 } 2500 2501 if (phydev && phydev->speed == SPEED_1000) { 2502 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) { 2503 /* Last element & Interrupt flag */ 2504 desc[frag - 1].flags = 2505 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; 2506 tx_ring->since_irq = 0; 2507 } else { /* Last element */ 2508 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT; 2509 } 2510 } else { 2511 desc[frag - 1].flags = 2512 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; 2513 } 2514 2515 desc[0].flags |= TXDESC_FLAG_FIRSTPKT; 2516 2517 tcb->index_start = tx_ring->send_idx; 2518 tcb->stale = 0; 2519 2520 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx); 2521 2522 if (thiscopy >= frag) { 2523 remainder = 0; 2524 thiscopy = frag; 2525 } else { 2526 remainder = frag - thiscopy; 2527 } 2528 2529 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx), 2530 desc, 2531 sizeof(struct tx_desc) * thiscopy); 2532 2533 add_10bit(&tx_ring->send_idx, thiscopy); 2534 2535 if (INDEX10(tx_ring->send_idx) == 0 || 2536 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) { 2537 tx_ring->send_idx &= ~ET_DMA10_MASK; 2538 tx_ring->send_idx ^= ET_DMA10_WRAP; 2539 } 2540 2541 if (remainder) { 2542 memcpy(tx_ring->tx_desc_ring, 2543 desc + thiscopy, 2544 sizeof(struct tx_desc) * remainder); 2545 2546 add_10bit(&tx_ring->send_idx, remainder); 2547 } 2548 2549 if (INDEX10(tx_ring->send_idx) == 0) { 2550 if (tx_ring->send_idx) 2551 tcb->index = NUM_DESC_PER_RING_TX - 1; 2552 else 2553 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); 2554 } else { 2555 tcb->index = tx_ring->send_idx - 1; 2556 } 2557 2558 spin_lock(&adapter->tcb_send_qlock); 2559 2560 if (tx_ring->send_tail) 2561 tx_ring->send_tail->next = tcb; 2562 else 2563 tx_ring->send_head = tcb; 2564 2565 tx_ring->send_tail = tcb; 2566 2567 WARN_ON(tcb->next != NULL); 2568 2569 tx_ring->used++; 2570 2571 spin_unlock(&adapter->tcb_send_qlock); 2572 2573 /* Write the new write pointer back to the device. */ 2574 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request); 2575 2576 /* For Gig only, we use Tx Interrupt coalescing. Enable the software 2577 * timer to wake us up if this packet isn't followed by N more. 2578 */ 2579 if (phydev && phydev->speed == SPEED_1000) { 2580 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 2581 &adapter->regs->global.watchdog_timer); 2582 } 2583 return 0; 2584 } 2585 2586 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) 2587 { 2588 int status; 2589 struct tcb *tcb; 2590 unsigned long flags; 2591 struct tx_ring *tx_ring = &adapter->tx_ring; 2592 2593 /* All packets must have at least a MAC address and a protocol type */ 2594 if (skb->len < ETH_HLEN) 2595 return -EIO; 2596 2597 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 2598 2599 tcb = tx_ring->tcb_qhead; 2600 2601 if (tcb == NULL) { 2602 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 2603 return -ENOMEM; 2604 } 2605 2606 tx_ring->tcb_qhead = tcb->next; 2607 2608 if (tx_ring->tcb_qhead == NULL) 2609 tx_ring->tcb_qtail = NULL; 2610 2611 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 2612 2613 tcb->skb = skb; 2614 tcb->next = NULL; 2615 2616 status = nic_send_packet(adapter, tcb); 2617 2618 if (status != 0) { 2619 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 2620 2621 if (tx_ring->tcb_qtail) 2622 tx_ring->tcb_qtail->next = tcb; 2623 else 2624 /* Apparently ready Q is empty. */ 2625 tx_ring->tcb_qhead = tcb; 2626 2627 tx_ring->tcb_qtail = tcb; 2628 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 2629 return status; 2630 } 2631 WARN_ON(tx_ring->used > NUM_TCB); 2632 return 0; 2633 } 2634 2635 /* free_send_packet - Recycle a struct tcb */ 2636 static inline void free_send_packet(struct et131x_adapter *adapter, 2637 struct tcb *tcb) 2638 { 2639 unsigned long flags; 2640 struct tx_desc *desc = NULL; 2641 struct net_device_stats *stats = &adapter->netdev->stats; 2642 struct tx_ring *tx_ring = &adapter->tx_ring; 2643 u64 dma_addr; 2644 2645 if (tcb->skb) { 2646 stats->tx_bytes += tcb->skb->len; 2647 2648 /* Iterate through the TX descriptors on the ring 2649 * corresponding to this packet and umap the fragments 2650 * they point to 2651 */ 2652 do { 2653 desc = tx_ring->tx_desc_ring + 2654 INDEX10(tcb->index_start); 2655 2656 dma_addr = desc->addr_lo; 2657 dma_addr |= (u64)desc->addr_hi << 32; 2658 2659 dma_unmap_single(&adapter->pdev->dev, 2660 dma_addr, 2661 desc->len_vlan, DMA_TO_DEVICE); 2662 2663 add_10bit(&tcb->index_start, 1); 2664 if (INDEX10(tcb->index_start) >= 2665 NUM_DESC_PER_RING_TX) { 2666 tcb->index_start &= ~ET_DMA10_MASK; 2667 tcb->index_start ^= ET_DMA10_WRAP; 2668 } 2669 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index)); 2670 2671 dev_kfree_skb_any(tcb->skb); 2672 } 2673 2674 memset(tcb, 0, sizeof(struct tcb)); 2675 2676 /* Add the TCB to the Ready Q */ 2677 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 2678 2679 stats->tx_packets++; 2680 2681 if (tx_ring->tcb_qtail) 2682 tx_ring->tcb_qtail->next = tcb; 2683 else /* Apparently ready Q is empty. */ 2684 tx_ring->tcb_qhead = tcb; 2685 2686 tx_ring->tcb_qtail = tcb; 2687 2688 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 2689 WARN_ON(tx_ring->used < 0); 2690 } 2691 2692 /* et131x_free_busy_send_packets - Free and complete the stopped active sends */ 2693 static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) 2694 { 2695 struct tcb *tcb; 2696 unsigned long flags; 2697 u32 freed = 0; 2698 struct tx_ring *tx_ring = &adapter->tx_ring; 2699 2700 /* Any packets being sent? Check the first TCB on the send list */ 2701 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 2702 2703 tcb = tx_ring->send_head; 2704 2705 while (tcb != NULL && freed < NUM_TCB) { 2706 struct tcb *next = tcb->next; 2707 2708 tx_ring->send_head = next; 2709 2710 if (next == NULL) 2711 tx_ring->send_tail = NULL; 2712 2713 tx_ring->used--; 2714 2715 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 2716 2717 freed++; 2718 free_send_packet(adapter, tcb); 2719 2720 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 2721 2722 tcb = tx_ring->send_head; 2723 } 2724 2725 WARN_ON(freed == NUM_TCB); 2726 2727 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 2728 2729 tx_ring->used = 0; 2730 } 2731 2732 /* et131x_handle_send_pkts 2733 * 2734 * Re-claim the send resources, complete sends and get more to send from 2735 * the send wait queue. 2736 */ 2737 static void et131x_handle_send_pkts(struct et131x_adapter *adapter) 2738 { 2739 unsigned long flags; 2740 u32 serviced; 2741 struct tcb *tcb; 2742 u32 index; 2743 struct tx_ring *tx_ring = &adapter->tx_ring; 2744 2745 serviced = readl(&adapter->regs->txdma.new_service_complete); 2746 index = INDEX10(serviced); 2747 2748 /* Has the ring wrapped? Process any descriptors that do not have 2749 * the same "wrap" indicator as the current completion indicator 2750 */ 2751 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 2752 2753 tcb = tx_ring->send_head; 2754 2755 while (tcb && 2756 ((serviced ^ tcb->index) & ET_DMA10_WRAP) && 2757 index < INDEX10(tcb->index)) { 2758 tx_ring->used--; 2759 tx_ring->send_head = tcb->next; 2760 if (tcb->next == NULL) 2761 tx_ring->send_tail = NULL; 2762 2763 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 2764 free_send_packet(adapter, tcb); 2765 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 2766 2767 /* Goto the next packet */ 2768 tcb = tx_ring->send_head; 2769 } 2770 while (tcb && 2771 !((serviced ^ tcb->index) & ET_DMA10_WRAP) && 2772 index > (tcb->index & ET_DMA10_MASK)) { 2773 tx_ring->used--; 2774 tx_ring->send_head = tcb->next; 2775 if (tcb->next == NULL) 2776 tx_ring->send_tail = NULL; 2777 2778 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 2779 free_send_packet(adapter, tcb); 2780 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 2781 2782 /* Goto the next packet */ 2783 tcb = tx_ring->send_head; 2784 } 2785 2786 /* Wake up the queue when we hit a low-water mark */ 2787 if (tx_ring->used <= NUM_TCB / 3) 2788 netif_wake_queue(adapter->netdev); 2789 2790 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 2791 } 2792 2793 static int et131x_get_regs_len(struct net_device *netdev) 2794 { 2795 #define ET131X_REGS_LEN 256 2796 return ET131X_REGS_LEN * sizeof(u32); 2797 } 2798 2799 static void et131x_get_regs(struct net_device *netdev, 2800 struct ethtool_regs *regs, void *regs_data) 2801 { 2802 struct et131x_adapter *adapter = netdev_priv(netdev); 2803 struct address_map __iomem *aregs = adapter->regs; 2804 u32 *regs_buff = regs_data; 2805 u32 num = 0; 2806 u16 tmp; 2807 2808 memset(regs_data, 0, et131x_get_regs_len(netdev)); 2809 2810 regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 2811 adapter->pdev->device; 2812 2813 /* PHY regs */ 2814 et131x_mii_read(adapter, MII_BMCR, &tmp); 2815 regs_buff[num++] = tmp; 2816 et131x_mii_read(adapter, MII_BMSR, &tmp); 2817 regs_buff[num++] = tmp; 2818 et131x_mii_read(adapter, MII_PHYSID1, &tmp); 2819 regs_buff[num++] = tmp; 2820 et131x_mii_read(adapter, MII_PHYSID2, &tmp); 2821 regs_buff[num++] = tmp; 2822 et131x_mii_read(adapter, MII_ADVERTISE, &tmp); 2823 regs_buff[num++] = tmp; 2824 et131x_mii_read(adapter, MII_LPA, &tmp); 2825 regs_buff[num++] = tmp; 2826 et131x_mii_read(adapter, MII_EXPANSION, &tmp); 2827 regs_buff[num++] = tmp; 2828 /* Autoneg next page transmit reg */ 2829 et131x_mii_read(adapter, 0x07, &tmp); 2830 regs_buff[num++] = tmp; 2831 /* Link partner next page reg */ 2832 et131x_mii_read(adapter, 0x08, &tmp); 2833 regs_buff[num++] = tmp; 2834 et131x_mii_read(adapter, MII_CTRL1000, &tmp); 2835 regs_buff[num++] = tmp; 2836 et131x_mii_read(adapter, MII_STAT1000, &tmp); 2837 regs_buff[num++] = tmp; 2838 et131x_mii_read(adapter, 0x0b, &tmp); 2839 regs_buff[num++] = tmp; 2840 et131x_mii_read(adapter, 0x0c, &tmp); 2841 regs_buff[num++] = tmp; 2842 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp); 2843 regs_buff[num++] = tmp; 2844 et131x_mii_read(adapter, MII_MMD_DATA, &tmp); 2845 regs_buff[num++] = tmp; 2846 et131x_mii_read(adapter, MII_ESTATUS, &tmp); 2847 regs_buff[num++] = tmp; 2848 2849 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp); 2850 regs_buff[num++] = tmp; 2851 et131x_mii_read(adapter, PHY_DATA_REG, &tmp); 2852 regs_buff[num++] = tmp; 2853 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp); 2854 regs_buff[num++] = tmp; 2855 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp); 2856 regs_buff[num++] = tmp; 2857 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp); 2858 regs_buff[num++] = tmp; 2859 2860 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp); 2861 regs_buff[num++] = tmp; 2862 et131x_mii_read(adapter, PHY_CONFIG, &tmp); 2863 regs_buff[num++] = tmp; 2864 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp); 2865 regs_buff[num++] = tmp; 2866 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp); 2867 regs_buff[num++] = tmp; 2868 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp); 2869 regs_buff[num++] = tmp; 2870 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp); 2871 regs_buff[num++] = tmp; 2872 et131x_mii_read(adapter, PHY_LED_1, &tmp); 2873 regs_buff[num++] = tmp; 2874 et131x_mii_read(adapter, PHY_LED_2, &tmp); 2875 regs_buff[num++] = tmp; 2876 2877 /* Global regs */ 2878 regs_buff[num++] = readl(&aregs->global.txq_start_addr); 2879 regs_buff[num++] = readl(&aregs->global.txq_end_addr); 2880 regs_buff[num++] = readl(&aregs->global.rxq_start_addr); 2881 regs_buff[num++] = readl(&aregs->global.rxq_end_addr); 2882 regs_buff[num++] = readl(&aregs->global.pm_csr); 2883 regs_buff[num++] = adapter->stats.interrupt_status; 2884 regs_buff[num++] = readl(&aregs->global.int_mask); 2885 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); 2886 regs_buff[num++] = readl(&aregs->global.int_status_alias); 2887 regs_buff[num++] = readl(&aregs->global.sw_reset); 2888 regs_buff[num++] = readl(&aregs->global.slv_timer); 2889 regs_buff[num++] = readl(&aregs->global.msi_config); 2890 regs_buff[num++] = readl(&aregs->global.loopback); 2891 regs_buff[num++] = readl(&aregs->global.watchdog_timer); 2892 2893 /* TXDMA regs */ 2894 regs_buff[num++] = readl(&aregs->txdma.csr); 2895 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); 2896 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); 2897 regs_buff[num++] = readl(&aregs->txdma.pr_num_des); 2898 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); 2899 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); 2900 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); 2901 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); 2902 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); 2903 regs_buff[num++] = readl(&aregs->txdma.service_request); 2904 regs_buff[num++] = readl(&aregs->txdma.service_complete); 2905 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); 2906 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); 2907 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); 2908 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); 2909 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); 2910 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); 2911 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); 2912 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); 2913 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); 2914 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); 2915 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); 2916 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); 2917 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); 2918 regs_buff[num++] = readl(&aregs->txdma.new_service_complete); 2919 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); 2920 2921 /* RXDMA regs */ 2922 regs_buff[num++] = readl(&aregs->rxdma.csr); 2923 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); 2924 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); 2925 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); 2926 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); 2927 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); 2928 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); 2929 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); 2930 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); 2931 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); 2932 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); 2933 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); 2934 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); 2935 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); 2936 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); 2937 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); 2938 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); 2939 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); 2940 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); 2941 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); 2942 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); 2943 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); 2944 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); 2945 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); 2946 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); 2947 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); 2948 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); 2949 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); 2950 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); 2951 } 2952 2953 static void et131x_get_drvinfo(struct net_device *netdev, 2954 struct ethtool_drvinfo *info) 2955 { 2956 struct et131x_adapter *adapter = netdev_priv(netdev); 2957 2958 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); 2959 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); 2960 strlcpy(info->bus_info, pci_name(adapter->pdev), 2961 sizeof(info->bus_info)); 2962 } 2963 2964 static const struct ethtool_ops et131x_ethtool_ops = { 2965 .get_drvinfo = et131x_get_drvinfo, 2966 .get_regs_len = et131x_get_regs_len, 2967 .get_regs = et131x_get_regs, 2968 .get_link = ethtool_op_get_link, 2969 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2970 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2971 }; 2972 2973 /* et131x_hwaddr_init - set up the MAC Address */ 2974 static void et131x_hwaddr_init(struct et131x_adapter *adapter) 2975 { 2976 /* If have our default mac from init and no mac address from 2977 * EEPROM then we need to generate the last octet and set it on the 2978 * device 2979 */ 2980 if (is_zero_ether_addr(adapter->rom_addr)) { 2981 /* We need to randomly generate the last octet so we 2982 * decrease our chances of setting the mac address to 2983 * same as another one of our cards in the system 2984 */ 2985 get_random_bytes(&adapter->addr[5], 1); 2986 /* We have the default value in the register we are 2987 * working with so we need to copy the current 2988 * address into the permanent address 2989 */ 2990 ether_addr_copy(adapter->rom_addr, adapter->addr); 2991 } else { 2992 /* We do not have an override address, so set the 2993 * current address to the permanent address and add 2994 * it to the device 2995 */ 2996 ether_addr_copy(adapter->addr, adapter->rom_addr); 2997 } 2998 } 2999 3000 static int et131x_pci_init(struct et131x_adapter *adapter, 3001 struct pci_dev *pdev) 3002 { 3003 u16 max_payload; 3004 int i, rc; 3005 3006 rc = et131x_init_eeprom(adapter); 3007 if (rc < 0) 3008 goto out; 3009 3010 if (!pci_is_pcie(pdev)) { 3011 dev_err(&pdev->dev, "Missing PCIe capabilities\n"); 3012 goto err_out; 3013 } 3014 3015 /* Program the Ack/Nak latency and replay timers */ 3016 max_payload = pdev->pcie_mpss; 3017 3018 if (max_payload < 2) { 3019 static const u16 acknak[2] = { 0x76, 0xD0 }; 3020 static const u16 replay[2] = { 0x1E0, 0x2ED }; 3021 3022 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, 3023 acknak[max_payload])) { 3024 dev_err(&pdev->dev, 3025 "Could not write PCI config space for ACK/NAK\n"); 3026 goto err_out; 3027 } 3028 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, 3029 replay[max_payload])) { 3030 dev_err(&pdev->dev, 3031 "Could not write PCI config space for Replay Timer\n"); 3032 goto err_out; 3033 } 3034 } 3035 3036 /* l0s and l1 latency timers. We are using default values. 3037 * Representing 001 for L0s and 010 for L1 3038 */ 3039 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { 3040 dev_err(&pdev->dev, 3041 "Could not write PCI config space for Latency Timers\n"); 3042 goto err_out; 3043 } 3044 3045 /* Change the max read size to 2k */ 3046 if (pcie_set_readrq(pdev, 2048)) { 3047 dev_err(&pdev->dev, 3048 "Couldn't change PCI config space for Max read size\n"); 3049 goto err_out; 3050 } 3051 3052 /* Get MAC address from config space if an eeprom exists, otherwise 3053 * the MAC address there will not be valid 3054 */ 3055 if (!adapter->has_eeprom) { 3056 et131x_hwaddr_init(adapter); 3057 return 0; 3058 } 3059 3060 for (i = 0; i < ETH_ALEN; i++) { 3061 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, 3062 adapter->rom_addr + i)) { 3063 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); 3064 goto err_out; 3065 } 3066 } 3067 ether_addr_copy(adapter->addr, adapter->rom_addr); 3068 out: 3069 return rc; 3070 err_out: 3071 rc = -EIO; 3072 goto out; 3073 } 3074 3075 /* et131x_error_timer_handler 3076 * @data: timer-specific variable; here a pointer to our adapter structure 3077 * 3078 * The routine called when the error timer expires, to track the number of 3079 * recurring errors. 3080 */ 3081 static void et131x_error_timer_handler(unsigned long data) 3082 { 3083 struct et131x_adapter *adapter = (struct et131x_adapter *)data; 3084 struct phy_device *phydev = adapter->netdev->phydev; 3085 3086 if (et1310_in_phy_coma(adapter)) { 3087 /* Bring the device immediately out of coma, to 3088 * prevent it from sleeping indefinitely, this 3089 * mechanism could be improved! 3090 */ 3091 et1310_disable_phy_coma(adapter); 3092 adapter->boot_coma = 20; 3093 } else { 3094 et1310_update_macstat_host_counters(adapter); 3095 } 3096 3097 if (!phydev->link && adapter->boot_coma < 11) 3098 adapter->boot_coma++; 3099 3100 if (adapter->boot_coma == 10) { 3101 if (!phydev->link) { 3102 if (!et1310_in_phy_coma(adapter)) { 3103 /* NOTE - This was originally a 'sync with 3104 * interrupt'. How to do that under Linux? 3105 */ 3106 et131x_enable_interrupts(adapter); 3107 et1310_enable_phy_coma(adapter); 3108 } 3109 } 3110 } 3111 3112 /* This is a periodic timer, so reschedule */ 3113 mod_timer(&adapter->error_timer, jiffies + 3114 msecs_to_jiffies(TX_ERROR_PERIOD)); 3115 } 3116 3117 static void et131x_adapter_memory_free(struct et131x_adapter *adapter) 3118 { 3119 et131x_tx_dma_memory_free(adapter); 3120 et131x_rx_dma_memory_free(adapter); 3121 } 3122 3123 static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) 3124 { 3125 int status; 3126 3127 status = et131x_tx_dma_memory_alloc(adapter); 3128 if (status) { 3129 dev_err(&adapter->pdev->dev, 3130 "et131x_tx_dma_memory_alloc FAILED\n"); 3131 et131x_tx_dma_memory_free(adapter); 3132 return status; 3133 } 3134 3135 status = et131x_rx_dma_memory_alloc(adapter); 3136 if (status) { 3137 dev_err(&adapter->pdev->dev, 3138 "et131x_rx_dma_memory_alloc FAILED\n"); 3139 et131x_adapter_memory_free(adapter); 3140 return status; 3141 } 3142 3143 status = et131x_init_recv(adapter); 3144 if (status) { 3145 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n"); 3146 et131x_adapter_memory_free(adapter); 3147 } 3148 return status; 3149 } 3150 3151 static void et131x_adjust_link(struct net_device *netdev) 3152 { 3153 struct et131x_adapter *adapter = netdev_priv(netdev); 3154 struct phy_device *phydev = netdev->phydev; 3155 3156 if (!phydev) 3157 return; 3158 if (phydev->link == adapter->link) 3159 return; 3160 3161 /* Check to see if we are in coma mode and if 3162 * so, disable it because we will not be able 3163 * to read PHY values until we are out. 3164 */ 3165 if (et1310_in_phy_coma(adapter)) 3166 et1310_disable_phy_coma(adapter); 3167 3168 adapter->link = phydev->link; 3169 phy_print_status(phydev); 3170 3171 if (phydev->link) { 3172 adapter->boot_coma = 20; 3173 if (phydev->speed == SPEED_10) { 3174 u16 register18; 3175 3176 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3177 ®ister18); 3178 et131x_mii_write(adapter, phydev->mdio.addr, 3179 PHY_MPHY_CONTROL_REG, 3180 register18 | 0x4); 3181 et131x_mii_write(adapter, phydev->mdio.addr, 3182 PHY_INDEX_REG, register18 | 0x8402); 3183 et131x_mii_write(adapter, phydev->mdio.addr, 3184 PHY_DATA_REG, register18 | 511); 3185 et131x_mii_write(adapter, phydev->mdio.addr, 3186 PHY_MPHY_CONTROL_REG, register18); 3187 } 3188 3189 et1310_config_flow_control(adapter); 3190 3191 if (phydev->speed == SPEED_1000 && 3192 adapter->registry_jumbo_packet > 2048) { 3193 u16 reg; 3194 3195 et131x_mii_read(adapter, PHY_CONFIG, ®); 3196 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; 3197 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; 3198 et131x_mii_write(adapter, phydev->mdio.addr, 3199 PHY_CONFIG, reg); 3200 } 3201 3202 et131x_set_rx_dma_timer(adapter); 3203 et1310_config_mac_regs2(adapter); 3204 } else { 3205 adapter->boot_coma = 0; 3206 3207 if (phydev->speed == SPEED_10) { 3208 u16 register18; 3209 3210 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3211 ®ister18); 3212 et131x_mii_write(adapter, phydev->mdio.addr, 3213 PHY_MPHY_CONTROL_REG, 3214 register18 | 0x4); 3215 et131x_mii_write(adapter, phydev->mdio.addr, 3216 PHY_INDEX_REG, register18 | 0x8402); 3217 et131x_mii_write(adapter, phydev->mdio.addr, 3218 PHY_DATA_REG, register18 | 511); 3219 et131x_mii_write(adapter, phydev->mdio.addr, 3220 PHY_MPHY_CONTROL_REG, register18); 3221 } 3222 3223 et131x_free_busy_send_packets(adapter); 3224 et131x_init_send(adapter); 3225 3226 /* Bring the device back to the state it was during 3227 * init prior to autonegotiation being complete. This 3228 * way, when we get the auto-neg complete interrupt, 3229 * we can complete init by calling config_mac_regs2. 3230 */ 3231 et131x_soft_reset(adapter); 3232 3233 et131x_adapter_setup(adapter); 3234 3235 et131x_disable_txrx(netdev); 3236 et131x_enable_txrx(netdev); 3237 } 3238 } 3239 3240 static int et131x_mii_probe(struct net_device *netdev) 3241 { 3242 struct et131x_adapter *adapter = netdev_priv(netdev); 3243 struct phy_device *phydev = NULL; 3244 3245 phydev = phy_find_first(adapter->mii_bus); 3246 if (!phydev) { 3247 dev_err(&adapter->pdev->dev, "no PHY found\n"); 3248 return -ENODEV; 3249 } 3250 3251 phydev = phy_connect(netdev, phydev_name(phydev), 3252 &et131x_adjust_link, PHY_INTERFACE_MODE_MII); 3253 3254 if (IS_ERR(phydev)) { 3255 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); 3256 return PTR_ERR(phydev); 3257 } 3258 3259 phydev->supported &= (SUPPORTED_10baseT_Half | 3260 SUPPORTED_10baseT_Full | 3261 SUPPORTED_100baseT_Half | 3262 SUPPORTED_100baseT_Full | 3263 SUPPORTED_Autoneg | 3264 SUPPORTED_MII | 3265 SUPPORTED_TP); 3266 3267 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) 3268 phydev->supported |= SUPPORTED_1000baseT_Half | 3269 SUPPORTED_1000baseT_Full; 3270 3271 phydev->advertising = phydev->supported; 3272 phydev->autoneg = AUTONEG_ENABLE; 3273 3274 phy_attached_info(phydev); 3275 3276 return 0; 3277 } 3278 3279 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, 3280 struct pci_dev *pdev) 3281 { 3282 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; 3283 3284 struct et131x_adapter *adapter; 3285 3286 adapter = netdev_priv(netdev); 3287 adapter->pdev = pci_dev_get(pdev); 3288 adapter->netdev = netdev; 3289 3290 spin_lock_init(&adapter->tcb_send_qlock); 3291 spin_lock_init(&adapter->tcb_ready_qlock); 3292 spin_lock_init(&adapter->rcv_lock); 3293 3294 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ 3295 3296 ether_addr_copy(adapter->addr, default_mac); 3297 3298 return adapter; 3299 } 3300 3301 static void et131x_pci_remove(struct pci_dev *pdev) 3302 { 3303 struct net_device *netdev = pci_get_drvdata(pdev); 3304 struct et131x_adapter *adapter = netdev_priv(netdev); 3305 3306 unregister_netdev(netdev); 3307 netif_napi_del(&adapter->napi); 3308 phy_disconnect(netdev->phydev); 3309 mdiobus_unregister(adapter->mii_bus); 3310 mdiobus_free(adapter->mii_bus); 3311 3312 et131x_adapter_memory_free(adapter); 3313 iounmap(adapter->regs); 3314 pci_dev_put(pdev); 3315 3316 free_netdev(netdev); 3317 pci_release_regions(pdev); 3318 pci_disable_device(pdev); 3319 } 3320 3321 static void et131x_up(struct net_device *netdev) 3322 { 3323 et131x_enable_txrx(netdev); 3324 phy_start(netdev->phydev); 3325 } 3326 3327 static void et131x_down(struct net_device *netdev) 3328 { 3329 /* Save the timestamp for the TX watchdog, prevent a timeout */ 3330 netif_trans_update(netdev); 3331 3332 phy_stop(netdev->phydev); 3333 et131x_disable_txrx(netdev); 3334 } 3335 3336 #ifdef CONFIG_PM_SLEEP 3337 static int et131x_suspend(struct device *dev) 3338 { 3339 struct pci_dev *pdev = to_pci_dev(dev); 3340 struct net_device *netdev = pci_get_drvdata(pdev); 3341 3342 if (netif_running(netdev)) { 3343 netif_device_detach(netdev); 3344 et131x_down(netdev); 3345 pci_save_state(pdev); 3346 } 3347 3348 return 0; 3349 } 3350 3351 static int et131x_resume(struct device *dev) 3352 { 3353 struct pci_dev *pdev = to_pci_dev(dev); 3354 struct net_device *netdev = pci_get_drvdata(pdev); 3355 3356 if (netif_running(netdev)) { 3357 pci_restore_state(pdev); 3358 et131x_up(netdev); 3359 netif_device_attach(netdev); 3360 } 3361 3362 return 0; 3363 } 3364 #endif 3365 3366 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); 3367 3368 static irqreturn_t et131x_isr(int irq, void *dev_id) 3369 { 3370 bool handled = true; 3371 bool enable_interrupts = true; 3372 struct net_device *netdev = dev_id; 3373 struct et131x_adapter *adapter = netdev_priv(netdev); 3374 struct address_map __iomem *iomem = adapter->regs; 3375 struct rx_ring *rx_ring = &adapter->rx_ring; 3376 struct tx_ring *tx_ring = &adapter->tx_ring; 3377 u32 status; 3378 3379 if (!netif_device_present(netdev)) { 3380 handled = false; 3381 enable_interrupts = false; 3382 goto out; 3383 } 3384 3385 et131x_disable_interrupts(adapter); 3386 3387 status = readl(&adapter->regs->global.int_status); 3388 3389 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) 3390 status &= ~INT_MASK_ENABLE; 3391 else 3392 status &= ~INT_MASK_ENABLE_NO_FLOW; 3393 3394 /* Make sure this is our interrupt */ 3395 if (!status) { 3396 handled = false; 3397 et131x_enable_interrupts(adapter); 3398 goto out; 3399 } 3400 3401 /* This is our interrupt, so process accordingly */ 3402 if (status & ET_INTR_WATCHDOG) { 3403 struct tcb *tcb = tx_ring->send_head; 3404 3405 if (tcb) 3406 if (++tcb->stale > 1) 3407 status |= ET_INTR_TXDMA_ISR; 3408 3409 if (rx_ring->unfinished_receives) 3410 status |= ET_INTR_RXDMA_XFR_DONE; 3411 else if (tcb == NULL) 3412 writel(0, &adapter->regs->global.watchdog_timer); 3413 3414 status &= ~ET_INTR_WATCHDOG; 3415 } 3416 3417 if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) { 3418 enable_interrupts = false; 3419 napi_schedule(&adapter->napi); 3420 } 3421 3422 status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE); 3423 3424 if (!status) 3425 goto out; 3426 3427 if (status & ET_INTR_TXDMA_ERR) { 3428 /* Following read also clears the register (COR) */ 3429 u32 txdma_err = readl(&iomem->txdma.tx_dma_error); 3430 3431 dev_warn(&adapter->pdev->dev, 3432 "TXDMA_ERR interrupt, error = %d\n", 3433 txdma_err); 3434 } 3435 3436 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { 3437 /* This indicates the number of unused buffers in RXDMA free 3438 * buffer ring 0 is <= the limit you programmed. Free buffer 3439 * resources need to be returned. Free buffers are consumed as 3440 * packets are passed from the network to the host. The host 3441 * becomes aware of the packets from the contents of the packet 3442 * status ring. This ring is queried when the packet done 3443 * interrupt occurs. Packets are then passed to the OS. When 3444 * the OS is done with the packets the resources can be 3445 * returned to the ET1310 for re-use. This interrupt is one 3446 * method of returning resources. 3447 */ 3448 3449 /* If the user has flow control on, then we will 3450 * send a pause packet, otherwise just exit 3451 */ 3452 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) { 3453 u32 pm_csr; 3454 3455 /* Tell the device to send a pause packet via the back 3456 * pressure register (bp req and bp xon/xoff) 3457 */ 3458 pm_csr = readl(&iomem->global.pm_csr); 3459 if (!et1310_in_phy_coma(adapter)) 3460 writel(3, &iomem->txmac.bp_ctrl); 3461 } 3462 } 3463 3464 /* Handle Packet Status Ring Low Interrupt */ 3465 if (status & ET_INTR_RXDMA_STAT_LOW) { 3466 /* Same idea as with the two Free Buffer Rings. Packets going 3467 * from the network to the host each consume a free buffer 3468 * resource and a packet status resource. These resources are 3469 * passed to the OS. When the OS is done with the resources, 3470 * they need to be returned to the ET1310. This is one method 3471 * of returning the resources. 3472 */ 3473 } 3474 3475 if (status & ET_INTR_RXDMA_ERR) { 3476 /* The rxdma_error interrupt is sent when a time-out on a 3477 * request issued by the JAGCore has occurred or a completion is 3478 * returned with an un-successful status. In both cases the 3479 * request is considered complete. The JAGCore will 3480 * automatically re-try the request in question. Normally 3481 * information on events like these are sent to the host using 3482 * the "Advanced Error Reporting" capability. This interrupt is 3483 * another way of getting similar information. The only thing 3484 * required is to clear the interrupt by reading the ISR in the 3485 * global resources. The JAGCore will do a re-try on the 3486 * request. Normally you should never see this interrupt. If 3487 * you start to see this interrupt occurring frequently then 3488 * something bad has occurred. A reset might be the thing to do. 3489 */ 3490 /* TRAP();*/ 3491 3492 dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n", 3493 readl(&iomem->txmac.tx_test)); 3494 } 3495 3496 /* Handle the Wake on LAN Event */ 3497 if (status & ET_INTR_WOL) { 3498 /* This is a secondary interrupt for wake on LAN. The driver 3499 * should never see this, if it does, something serious is 3500 * wrong. 3501 */ 3502 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); 3503 } 3504 3505 if (status & ET_INTR_TXMAC) { 3506 u32 err = readl(&iomem->txmac.err); 3507 3508 /* When any of the errors occur and TXMAC generates an 3509 * interrupt to report these errors, it usually means that 3510 * TXMAC has detected an error in the data stream retrieved 3511 * from the on-chip Tx Q. All of these errors are catastrophic 3512 * and TXMAC won't be able to recover data when these errors 3513 * occur. In a nutshell, the whole Tx path will have to be reset 3514 * and re-configured afterwards. 3515 */ 3516 dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n", 3517 err); 3518 3519 /* If we are debugging, we want to see this error, otherwise we 3520 * just want the device to be reset and continue 3521 */ 3522 } 3523 3524 if (status & ET_INTR_RXMAC) { 3525 /* These interrupts are catastrophic to the device, what we need 3526 * to do is disable the interrupts and set the flag to cause us 3527 * to reset so we can solve this issue. 3528 */ 3529 dev_warn(&adapter->pdev->dev, 3530 "RXMAC interrupt, error 0x%08x. Requesting reset\n", 3531 readl(&iomem->rxmac.err_reg)); 3532 3533 dev_warn(&adapter->pdev->dev, 3534 "Enable 0x%08x, Diag 0x%08x\n", 3535 readl(&iomem->rxmac.ctrl), 3536 readl(&iomem->rxmac.rxq_diag)); 3537 3538 /* If we are debugging, we want to see this error, otherwise we 3539 * just want the device to be reset and continue 3540 */ 3541 } 3542 3543 if (status & ET_INTR_MAC_STAT) { 3544 /* This means at least one of the un-masked counters in the 3545 * MAC_STAT block has rolled over. Use this to maintain the top, 3546 * software managed bits of the counter(s). 3547 */ 3548 et1310_handle_macstat_interrupt(adapter); 3549 } 3550 3551 if (status & ET_INTR_SLV_TIMEOUT) { 3552 /* This means a timeout has occurred on a read or write request 3553 * to one of the JAGCore registers. The Global Resources block 3554 * has terminated the request and on a read request, returned a 3555 * "fake" value. The most likely reasons are: Bad Address or the 3556 * addressed module is in a power-down state and can't respond. 3557 */ 3558 } 3559 3560 out: 3561 if (enable_interrupts) 3562 et131x_enable_interrupts(adapter); 3563 3564 return IRQ_RETVAL(handled); 3565 } 3566 3567 static int et131x_poll(struct napi_struct *napi, int budget) 3568 { 3569 struct et131x_adapter *adapter = 3570 container_of(napi, struct et131x_adapter, napi); 3571 int work_done = et131x_handle_recv_pkts(adapter, budget); 3572 3573 et131x_handle_send_pkts(adapter); 3574 3575 if (work_done < budget) { 3576 napi_complete(&adapter->napi); 3577 et131x_enable_interrupts(adapter); 3578 } 3579 3580 return work_done; 3581 } 3582 3583 /* et131x_stats - Return the current device statistics */ 3584 static struct net_device_stats *et131x_stats(struct net_device *netdev) 3585 { 3586 struct et131x_adapter *adapter = netdev_priv(netdev); 3587 struct net_device_stats *stats = &adapter->netdev->stats; 3588 struct ce_stats *devstat = &adapter->stats; 3589 3590 stats->rx_errors = devstat->rx_length_errs + 3591 devstat->rx_align_errs + 3592 devstat->rx_crc_errs + 3593 devstat->rx_code_violations + 3594 devstat->rx_other_errs; 3595 stats->tx_errors = devstat->tx_max_pkt_errs; 3596 stats->multicast = devstat->multicast_pkts_rcvd; 3597 stats->collisions = devstat->tx_collisions; 3598 3599 stats->rx_length_errors = devstat->rx_length_errs; 3600 stats->rx_over_errors = devstat->rx_overflows; 3601 stats->rx_crc_errors = devstat->rx_crc_errs; 3602 stats->rx_dropped = devstat->rcvd_pkts_dropped; 3603 3604 /* NOTE: Not used, can't find analogous statistics */ 3605 /* stats->rx_frame_errors = devstat->; */ 3606 /* stats->rx_fifo_errors = devstat->; */ 3607 /* stats->rx_missed_errors = devstat->; */ 3608 3609 /* stats->tx_aborted_errors = devstat->; */ 3610 /* stats->tx_carrier_errors = devstat->; */ 3611 /* stats->tx_fifo_errors = devstat->; */ 3612 /* stats->tx_heartbeat_errors = devstat->; */ 3613 /* stats->tx_window_errors = devstat->; */ 3614 return stats; 3615 } 3616 3617 static int et131x_open(struct net_device *netdev) 3618 { 3619 struct et131x_adapter *adapter = netdev_priv(netdev); 3620 struct pci_dev *pdev = adapter->pdev; 3621 unsigned int irq = pdev->irq; 3622 int result; 3623 3624 /* Start the timer to track NIC errors */ 3625 init_timer(&adapter->error_timer); 3626 adapter->error_timer.expires = jiffies + 3627 msecs_to_jiffies(TX_ERROR_PERIOD); 3628 adapter->error_timer.function = et131x_error_timer_handler; 3629 adapter->error_timer.data = (unsigned long)adapter; 3630 add_timer(&adapter->error_timer); 3631 3632 result = request_irq(irq, et131x_isr, 3633 IRQF_SHARED, netdev->name, netdev); 3634 if (result) { 3635 dev_err(&pdev->dev, "could not register IRQ %d\n", irq); 3636 return result; 3637 } 3638 3639 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE; 3640 3641 napi_enable(&adapter->napi); 3642 3643 et131x_up(netdev); 3644 3645 return result; 3646 } 3647 3648 static int et131x_close(struct net_device *netdev) 3649 { 3650 struct et131x_adapter *adapter = netdev_priv(netdev); 3651 3652 et131x_down(netdev); 3653 napi_disable(&adapter->napi); 3654 3655 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE; 3656 free_irq(adapter->pdev->irq, netdev); 3657 3658 /* Stop the error timer */ 3659 return del_timer_sync(&adapter->error_timer); 3660 } 3661 3662 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, 3663 int cmd) 3664 { 3665 if (!netdev->phydev) 3666 return -EINVAL; 3667 3668 return phy_mii_ioctl(netdev->phydev, reqbuf, cmd); 3669 } 3670 3671 /* et131x_set_packet_filter - Configures the Rx Packet filtering */ 3672 static int et131x_set_packet_filter(struct et131x_adapter *adapter) 3673 { 3674 int filter = adapter->packet_filter; 3675 u32 ctrl; 3676 u32 pf_ctrl; 3677 3678 ctrl = readl(&adapter->regs->rxmac.ctrl); 3679 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); 3680 3681 /* Default to disabled packet filtering */ 3682 ctrl |= 0x04; 3683 3684 /* Set us to be in promiscuous mode so we receive everything, this 3685 * is also true when we get a packet filter of 0 3686 */ 3687 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) 3688 pf_ctrl &= ~7; /* Clear filter bits */ 3689 else { 3690 /* Set us up with Multicast packet filtering. Three cases are 3691 * possible - (1) we have a multi-cast list, (2) we receive ALL 3692 * multicast entries or (3) we receive none. 3693 */ 3694 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) 3695 pf_ctrl &= ~2; /* Multicast filter bit */ 3696 else { 3697 et1310_setup_device_for_multicast(adapter); 3698 pf_ctrl |= 2; 3699 ctrl &= ~0x04; 3700 } 3701 3702 /* Set us up with Unicast packet filtering */ 3703 if (filter & ET131X_PACKET_TYPE_DIRECTED) { 3704 et1310_setup_device_for_unicast(adapter); 3705 pf_ctrl |= 4; 3706 ctrl &= ~0x04; 3707 } 3708 3709 /* Set us up with Broadcast packet filtering */ 3710 if (filter & ET131X_PACKET_TYPE_BROADCAST) { 3711 pf_ctrl |= 1; /* Broadcast filter bit */ 3712 ctrl &= ~0x04; 3713 } else { 3714 pf_ctrl &= ~1; 3715 } 3716 3717 /* Setup the receive mac configuration registers - Packet 3718 * Filter control + the enable / disable for packet filter 3719 * in the control reg. 3720 */ 3721 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); 3722 writel(ctrl, &adapter->regs->rxmac.ctrl); 3723 } 3724 return 0; 3725 } 3726 3727 static void et131x_multicast(struct net_device *netdev) 3728 { 3729 struct et131x_adapter *adapter = netdev_priv(netdev); 3730 int packet_filter; 3731 struct netdev_hw_addr *ha; 3732 int i; 3733 3734 /* Before we modify the platform-independent filter flags, store them 3735 * locally. This allows us to determine if anything's changed and if 3736 * we even need to bother the hardware 3737 */ 3738 packet_filter = adapter->packet_filter; 3739 3740 /* Clear the 'multicast' flag locally; because we only have a single 3741 * flag to check multicast, and multiple multicast addresses can be 3742 * set, this is the easiest way to determine if more than one 3743 * multicast address is being set. 3744 */ 3745 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 3746 3747 /* Check the net_device flags and set the device independent flags 3748 * accordingly 3749 */ 3750 if (netdev->flags & IFF_PROMISC) 3751 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; 3752 else 3753 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; 3754 3755 if ((netdev->flags & IFF_ALLMULTI) || 3756 (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)) 3757 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 3758 3759 if (netdev_mc_count(netdev) < 1) { 3760 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; 3761 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 3762 } else { 3763 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; 3764 } 3765 3766 /* Set values in the private adapter struct */ 3767 i = 0; 3768 netdev_for_each_mc_addr(ha, netdev) { 3769 if (i == NIC_MAX_MCAST_LIST) 3770 break; 3771 ether_addr_copy(adapter->multicast_list[i++], ha->addr); 3772 } 3773 adapter->multicast_addr_count = i; 3774 3775 /* Are the new flags different from the previous ones? If not, then no 3776 * action is required 3777 * 3778 * NOTE - This block will always update the multicast_list with the 3779 * hardware, even if the addresses aren't the same. 3780 */ 3781 if (packet_filter != adapter->packet_filter) 3782 et131x_set_packet_filter(adapter); 3783 } 3784 3785 static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev) 3786 { 3787 struct et131x_adapter *adapter = netdev_priv(netdev); 3788 struct tx_ring *tx_ring = &adapter->tx_ring; 3789 3790 /* stop the queue if it's getting full */ 3791 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev)) 3792 netif_stop_queue(netdev); 3793 3794 /* Save the timestamp for the TX timeout watchdog */ 3795 netif_trans_update(netdev); 3796 3797 /* TCB is not available */ 3798 if (tx_ring->used >= NUM_TCB) 3799 goto drop_err; 3800 3801 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) || 3802 !netif_carrier_ok(netdev)) 3803 goto drop_err; 3804 3805 if (send_packet(skb, adapter)) 3806 goto drop_err; 3807 3808 return NETDEV_TX_OK; 3809 3810 drop_err: 3811 dev_kfree_skb_any(skb); 3812 adapter->netdev->stats.tx_dropped++; 3813 return NETDEV_TX_OK; 3814 } 3815 3816 /* et131x_tx_timeout - Timeout handler 3817 * 3818 * The handler called when a Tx request times out. The timeout period is 3819 * specified by the 'tx_timeo" element in the net_device structure (see 3820 * et131x_alloc_device() to see how this value is set). 3821 */ 3822 static void et131x_tx_timeout(struct net_device *netdev) 3823 { 3824 struct et131x_adapter *adapter = netdev_priv(netdev); 3825 struct tx_ring *tx_ring = &adapter->tx_ring; 3826 struct tcb *tcb; 3827 unsigned long flags; 3828 3829 /* If the device is closed, ignore the timeout */ 3830 if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) 3831 return; 3832 3833 /* Any nonrecoverable hardware error? 3834 * Checks adapter->flags for any failure in phy reading 3835 */ 3836 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR) 3837 return; 3838 3839 /* Hardware failure? */ 3840 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) { 3841 dev_err(&adapter->pdev->dev, "hardware error - reset\n"); 3842 return; 3843 } 3844 3845 /* Is send stuck? */ 3846 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3847 tcb = tx_ring->send_head; 3848 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3849 3850 if (tcb) { 3851 tcb->count++; 3852 3853 if (tcb->count > NIC_SEND_HANG_THRESHOLD) { 3854 dev_warn(&adapter->pdev->dev, 3855 "Send stuck - reset. tcb->WrIndex %x\n", 3856 tcb->index); 3857 3858 adapter->netdev->stats.tx_errors++; 3859 3860 /* perform reset of tx/rx */ 3861 et131x_disable_txrx(netdev); 3862 et131x_enable_txrx(netdev); 3863 } 3864 } 3865 } 3866 3867 static int et131x_change_mtu(struct net_device *netdev, int new_mtu) 3868 { 3869 int result = 0; 3870 struct et131x_adapter *adapter = netdev_priv(netdev); 3871 3872 if (new_mtu < 64 || new_mtu > 9216) 3873 return -EINVAL; 3874 3875 et131x_disable_txrx(netdev); 3876 3877 netdev->mtu = new_mtu; 3878 3879 et131x_adapter_memory_free(adapter); 3880 3881 /* Set the config parameter for Jumbo Packet support */ 3882 adapter->registry_jumbo_packet = new_mtu + 14; 3883 et131x_soft_reset(adapter); 3884 3885 result = et131x_adapter_memory_alloc(adapter); 3886 if (result != 0) { 3887 dev_warn(&adapter->pdev->dev, 3888 "Change MTU failed; couldn't re-alloc DMA memory\n"); 3889 return result; 3890 } 3891 3892 et131x_init_send(adapter); 3893 et131x_hwaddr_init(adapter); 3894 ether_addr_copy(netdev->dev_addr, adapter->addr); 3895 3896 /* Init the device with the new settings */ 3897 et131x_adapter_setup(adapter); 3898 et131x_enable_txrx(netdev); 3899 3900 return result; 3901 } 3902 3903 static const struct net_device_ops et131x_netdev_ops = { 3904 .ndo_open = et131x_open, 3905 .ndo_stop = et131x_close, 3906 .ndo_start_xmit = et131x_tx, 3907 .ndo_set_rx_mode = et131x_multicast, 3908 .ndo_tx_timeout = et131x_tx_timeout, 3909 .ndo_change_mtu = et131x_change_mtu, 3910 .ndo_set_mac_address = eth_mac_addr, 3911 .ndo_validate_addr = eth_validate_addr, 3912 .ndo_get_stats = et131x_stats, 3913 .ndo_do_ioctl = et131x_ioctl, 3914 }; 3915 3916 static int et131x_pci_setup(struct pci_dev *pdev, 3917 const struct pci_device_id *ent) 3918 { 3919 struct net_device *netdev; 3920 struct et131x_adapter *adapter; 3921 int rc; 3922 3923 rc = pci_enable_device(pdev); 3924 if (rc < 0) { 3925 dev_err(&pdev->dev, "pci_enable_device() failed\n"); 3926 goto out; 3927 } 3928 3929 /* Perform some basic PCI checks */ 3930 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3931 dev_err(&pdev->dev, "Can't find PCI device's base address\n"); 3932 rc = -ENODEV; 3933 goto err_disable; 3934 } 3935 3936 rc = pci_request_regions(pdev, DRIVER_NAME); 3937 if (rc < 0) { 3938 dev_err(&pdev->dev, "Can't get PCI resources\n"); 3939 goto err_disable; 3940 } 3941 3942 pci_set_master(pdev); 3943 3944 /* Check the DMA addressing support of this device */ 3945 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && 3946 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { 3947 dev_err(&pdev->dev, "No usable DMA addressing method\n"); 3948 rc = -EIO; 3949 goto err_release_res; 3950 } 3951 3952 netdev = alloc_etherdev(sizeof(struct et131x_adapter)); 3953 if (!netdev) { 3954 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); 3955 rc = -ENOMEM; 3956 goto err_release_res; 3957 } 3958 3959 netdev->watchdog_timeo = ET131X_TX_TIMEOUT; 3960 netdev->netdev_ops = &et131x_netdev_ops; 3961 3962 SET_NETDEV_DEV(netdev, &pdev->dev); 3963 netdev->ethtool_ops = &et131x_ethtool_ops; 3964 3965 adapter = et131x_adapter_init(netdev, pdev); 3966 3967 rc = et131x_pci_init(adapter, pdev); 3968 if (rc < 0) 3969 goto err_free_dev; 3970 3971 /* Map the bus-relative registers to system virtual memory */ 3972 adapter->regs = pci_ioremap_bar(pdev, 0); 3973 if (!adapter->regs) { 3974 dev_err(&pdev->dev, "Cannot map device registers\n"); 3975 rc = -ENOMEM; 3976 goto err_free_dev; 3977 } 3978 3979 /* If Phy COMA mode was enabled when we went down, disable it here. */ 3980 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); 3981 3982 et131x_soft_reset(adapter); 3983 et131x_disable_interrupts(adapter); 3984 3985 rc = et131x_adapter_memory_alloc(adapter); 3986 if (rc < 0) { 3987 dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n"); 3988 goto err_iounmap; 3989 } 3990 3991 et131x_init_send(adapter); 3992 3993 netif_napi_add(netdev, &adapter->napi, et131x_poll, 64); 3994 3995 ether_addr_copy(netdev->dev_addr, adapter->addr); 3996 3997 rc = -ENOMEM; 3998 3999 adapter->mii_bus = mdiobus_alloc(); 4000 if (!adapter->mii_bus) { 4001 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); 4002 goto err_mem_free; 4003 } 4004 4005 adapter->mii_bus->name = "et131x_eth_mii"; 4006 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", 4007 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); 4008 adapter->mii_bus->priv = netdev; 4009 adapter->mii_bus->read = et131x_mdio_read; 4010 adapter->mii_bus->write = et131x_mdio_write; 4011 4012 rc = mdiobus_register(adapter->mii_bus); 4013 if (rc < 0) { 4014 dev_err(&pdev->dev, "failed to register MII bus\n"); 4015 goto err_mdio_free; 4016 } 4017 4018 rc = et131x_mii_probe(netdev); 4019 if (rc < 0) { 4020 dev_err(&pdev->dev, "failed to probe MII bus\n"); 4021 goto err_mdio_unregister; 4022 } 4023 4024 et131x_adapter_setup(adapter); 4025 4026 /* Init variable for counting how long we do not have link status */ 4027 adapter->boot_coma = 0; 4028 et1310_disable_phy_coma(adapter); 4029 4030 /* We can enable interrupts now 4031 * 4032 * NOTE - Because registration of interrupt handler is done in the 4033 * device's open(), defer enabling device interrupts to that 4034 * point 4035 */ 4036 4037 rc = register_netdev(netdev); 4038 if (rc < 0) { 4039 dev_err(&pdev->dev, "register_netdev() failed\n"); 4040 goto err_phy_disconnect; 4041 } 4042 4043 /* Register the net_device struct with the PCI subsystem. Save a copy 4044 * of the PCI config space for this device now that the device has 4045 * been initialized, just in case it needs to be quickly restored. 4046 */ 4047 pci_set_drvdata(pdev, netdev); 4048 out: 4049 return rc; 4050 4051 err_phy_disconnect: 4052 phy_disconnect(netdev->phydev); 4053 err_mdio_unregister: 4054 mdiobus_unregister(adapter->mii_bus); 4055 err_mdio_free: 4056 mdiobus_free(adapter->mii_bus); 4057 err_mem_free: 4058 et131x_adapter_memory_free(adapter); 4059 err_iounmap: 4060 iounmap(adapter->regs); 4061 err_free_dev: 4062 pci_dev_put(pdev); 4063 free_netdev(netdev); 4064 err_release_res: 4065 pci_release_regions(pdev); 4066 err_disable: 4067 pci_disable_device(pdev); 4068 goto out; 4069 } 4070 4071 static const struct pci_device_id et131x_pci_table[] = { 4072 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, 4073 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, 4074 { 0,} 4075 }; 4076 MODULE_DEVICE_TABLE(pci, et131x_pci_table); 4077 4078 static struct pci_driver et131x_driver = { 4079 .name = DRIVER_NAME, 4080 .id_table = et131x_pci_table, 4081 .probe = et131x_pci_setup, 4082 .remove = et131x_pci_remove, 4083 .driver.pm = &et131x_pm_ops, 4084 }; 4085 4086 module_pci_driver(et131x_driver); 4087