1 /* 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 4 * 5 * Based on the 64360 driver from: 6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> 7 * Rabeeh Khoury <rabeeh@marvell.com> 8 * 9 * Copyright (C) 2003 PMC-Sierra, Inc., 10 * written by Manish Lachwani 11 * 12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> 13 * 14 * Copyright (C) 2004-2006 MontaVista Software, Inc. 15 * Dale Farnsworth <dale@farnsworth.org> 16 * 17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 18 * <sjhill@realitydiluted.com> 19 * 20 * Copyright (C) 2007-2008 Marvell Semiconductor 21 * Lennert Buytenhek <buytenh@marvell.com> 22 * 23 * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de> 24 * 25 * This program is free software; you can redistribute it and/or 26 * modify it under the terms of the GNU General Public License 27 * as published by the Free Software Foundation; either version 2 28 * of the License, or (at your option) any later version. 29 * 30 * This program is distributed in the hope that it will be useful, 31 * but WITHOUT ANY WARRANTY; without even the implied warranty of 32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 33 * GNU General Public License for more details. 34 * 35 * You should have received a copy of the GNU General Public License 36 * along with this program; if not, see <http://www.gnu.org/licenses/>. 37 */ 38 39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41 #include <linux/init.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/in.h> 44 #include <linux/ip.h> 45 #include <net/tso.h> 46 #include <linux/tcp.h> 47 #include <linux/udp.h> 48 #include <linux/etherdevice.h> 49 #include <linux/delay.h> 50 #include <linux/ethtool.h> 51 #include <linux/platform_device.h> 52 #include <linux/module.h> 53 #include <linux/kernel.h> 54 #include <linux/spinlock.h> 55 #include <linux/workqueue.h> 56 #include <linux/phy.h> 57 #include <linux/mv643xx_eth.h> 58 #include <linux/io.h> 59 #include <linux/interrupt.h> 60 #include <linux/types.h> 61 #include <linux/slab.h> 62 #include <linux/clk.h> 63 #include <linux/of.h> 64 #include <linux/of_irq.h> 65 #include <linux/of_net.h> 66 #include <linux/of_mdio.h> 67 68 static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 69 static char mv643xx_eth_driver_version[] = "1.4"; 70 71 72 /* 73 * Registers shared between all ports. 74 */ 75 #define PHY_ADDR 0x0000 76 #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 77 #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 78 #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 79 #define WINDOW_BAR_ENABLE 0x0290 80 #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) 81 82 /* 83 * Main per-port registers. These live at offset 0x0400 for 84 * port #0, 0x0800 for port #1, and 0x0c00 for port #2. 85 */ 86 #define PORT_CONFIG 0x0000 87 #define UNICAST_PROMISCUOUS_MODE 0x00000001 88 #define PORT_CONFIG_EXT 0x0004 89 #define MAC_ADDR_LOW 0x0014 90 #define MAC_ADDR_HIGH 0x0018 91 #define SDMA_CONFIG 0x001c 92 #define TX_BURST_SIZE_16_64BIT 0x01000000 93 #define TX_BURST_SIZE_4_64BIT 0x00800000 94 #define BLM_TX_NO_SWAP 0x00000020 95 #define BLM_RX_NO_SWAP 0x00000010 96 #define RX_BURST_SIZE_16_64BIT 0x00000008 97 #define RX_BURST_SIZE_4_64BIT 0x00000004 98 #define PORT_SERIAL_CONTROL 0x003c 99 #define SET_MII_SPEED_TO_100 0x01000000 100 #define SET_GMII_SPEED_TO_1000 0x00800000 101 #define SET_FULL_DUPLEX_MODE 0x00200000 102 #define MAX_RX_PACKET_9700BYTE 0x000a0000 103 #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 104 #define DO_NOT_FORCE_LINK_FAIL 0x00000400 105 #define SERIAL_PORT_CONTROL_RESERVED 0x00000200 106 #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 107 #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 108 #define FORCE_LINK_PASS 0x00000002 109 #define SERIAL_PORT_ENABLE 0x00000001 110 #define PORT_STATUS 0x0044 111 #define TX_FIFO_EMPTY 0x00000400 112 #define TX_IN_PROGRESS 0x00000080 113 #define PORT_SPEED_MASK 0x00000030 114 #define PORT_SPEED_1000 0x00000010 115 #define PORT_SPEED_100 0x00000020 116 #define PORT_SPEED_10 0x00000000 117 #define FLOW_CONTROL_ENABLED 0x00000008 118 #define FULL_DUPLEX 0x00000004 119 #define LINK_UP 0x00000002 120 #define TXQ_COMMAND 0x0048 121 #define TXQ_FIX_PRIO_CONF 0x004c 122 #define PORT_SERIAL_CONTROL1 0x004c 123 #define CLK125_BYPASS_EN 0x00000010 124 #define TX_BW_RATE 0x0050 125 #define TX_BW_MTU 0x0058 126 #define TX_BW_BURST 0x005c 127 #define INT_CAUSE 0x0060 128 #define INT_TX_END 0x07f80000 129 #define INT_TX_END_0 0x00080000 130 #define INT_RX 0x000003fc 131 #define INT_RX_0 0x00000004 132 #define INT_EXT 0x00000002 133 #define INT_CAUSE_EXT 0x0064 134 #define INT_EXT_LINK_PHY 0x00110000 135 #define INT_EXT_TX 0x000000ff 136 #define INT_MASK 0x0068 137 #define INT_MASK_EXT 0x006c 138 #define TX_FIFO_URGENT_THRESHOLD 0x0074 139 #define RX_DISCARD_FRAME_CNT 0x0084 140 #define RX_OVERRUN_FRAME_CNT 0x0088 141 #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc 142 #define TX_BW_RATE_MOVED 0x00e0 143 #define TX_BW_MTU_MOVED 0x00e8 144 #define TX_BW_BURST_MOVED 0x00ec 145 #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) 146 #define RXQ_COMMAND 0x0280 147 #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) 148 #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) 149 #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) 150 #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) 151 152 /* 153 * Misc per-port registers. 154 */ 155 #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) 156 #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) 157 #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) 158 #define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) 159 160 161 /* 162 * SDMA configuration register default value. 163 */ 164 #if defined(__BIG_ENDIAN) 165 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 166 (RX_BURST_SIZE_4_64BIT | \ 167 TX_BURST_SIZE_4_64BIT) 168 #elif defined(__LITTLE_ENDIAN) 169 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 170 (RX_BURST_SIZE_4_64BIT | \ 171 BLM_RX_NO_SWAP | \ 172 BLM_TX_NO_SWAP | \ 173 TX_BURST_SIZE_4_64BIT) 174 #else 175 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 176 #endif 177 178 179 /* 180 * Misc definitions. 181 */ 182 #define DEFAULT_RX_QUEUE_SIZE 128 183 #define DEFAULT_TX_QUEUE_SIZE 512 184 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) 185 186 /* Max number of allowed TCP segments for software TSO */ 187 #define MV643XX_MAX_TSO_SEGS 100 188 #define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 189 190 #define IS_TSO_HEADER(txq, addr) \ 191 ((addr >= txq->tso_hdrs_dma) && \ 192 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 193 194 #define DESC_DMA_MAP_SINGLE 0 195 #define DESC_DMA_MAP_PAGE 1 196 197 /* 198 * RX/TX descriptors. 199 */ 200 #if defined(__BIG_ENDIAN) 201 struct rx_desc { 202 u16 byte_cnt; /* Descriptor buffer byte count */ 203 u16 buf_size; /* Buffer size */ 204 u32 cmd_sts; /* Descriptor command status */ 205 u32 next_desc_ptr; /* Next descriptor pointer */ 206 u32 buf_ptr; /* Descriptor buffer pointer */ 207 }; 208 209 struct tx_desc { 210 u16 byte_cnt; /* buffer byte count */ 211 u16 l4i_chk; /* CPU provided TCP checksum */ 212 u32 cmd_sts; /* Command/status field */ 213 u32 next_desc_ptr; /* Pointer to next descriptor */ 214 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 215 }; 216 #elif defined(__LITTLE_ENDIAN) 217 struct rx_desc { 218 u32 cmd_sts; /* Descriptor command status */ 219 u16 buf_size; /* Buffer size */ 220 u16 byte_cnt; /* Descriptor buffer byte count */ 221 u32 buf_ptr; /* Descriptor buffer pointer */ 222 u32 next_desc_ptr; /* Next descriptor pointer */ 223 }; 224 225 struct tx_desc { 226 u32 cmd_sts; /* Command/status field */ 227 u16 l4i_chk; /* CPU provided TCP checksum */ 228 u16 byte_cnt; /* buffer byte count */ 229 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 230 u32 next_desc_ptr; /* Pointer to next descriptor */ 231 }; 232 #else 233 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 234 #endif 235 236 /* RX & TX descriptor command */ 237 #define BUFFER_OWNED_BY_DMA 0x80000000 238 239 /* RX & TX descriptor status */ 240 #define ERROR_SUMMARY 0x00000001 241 242 /* RX descriptor status */ 243 #define LAYER_4_CHECKSUM_OK 0x40000000 244 #define RX_ENABLE_INTERRUPT 0x20000000 245 #define RX_FIRST_DESC 0x08000000 246 #define RX_LAST_DESC 0x04000000 247 #define RX_IP_HDR_OK 0x02000000 248 #define RX_PKT_IS_IPV4 0x01000000 249 #define RX_PKT_IS_ETHERNETV2 0x00800000 250 #define RX_PKT_LAYER4_TYPE_MASK 0x00600000 251 #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 252 #define RX_PKT_IS_VLAN_TAGGED 0x00080000 253 254 /* TX descriptor command */ 255 #define TX_ENABLE_INTERRUPT 0x00800000 256 #define GEN_CRC 0x00400000 257 #define TX_FIRST_DESC 0x00200000 258 #define TX_LAST_DESC 0x00100000 259 #define ZERO_PADDING 0x00080000 260 #define GEN_IP_V4_CHECKSUM 0x00040000 261 #define GEN_TCP_UDP_CHECKSUM 0x00020000 262 #define UDP_FRAME 0x00010000 263 #define MAC_HDR_EXTRA_4_BYTES 0x00008000 264 #define GEN_TCP_UDP_CHK_FULL 0x00000400 265 #define MAC_HDR_EXTRA_8_BYTES 0x00000200 266 267 #define TX_IHL_SHIFT 11 268 269 270 /* global *******************************************************************/ 271 struct mv643xx_eth_shared_private { 272 /* 273 * Ethernet controller base address. 274 */ 275 void __iomem *base; 276 277 /* 278 * Per-port MBUS window access register value. 279 */ 280 u32 win_protect; 281 282 /* 283 * Hardware-specific parameters. 284 */ 285 int extended_rx_coal_limit; 286 int tx_bw_control; 287 int tx_csum_limit; 288 struct clk *clk; 289 }; 290 291 #define TX_BW_CONTROL_ABSENT 0 292 #define TX_BW_CONTROL_OLD_LAYOUT 1 293 #define TX_BW_CONTROL_NEW_LAYOUT 2 294 295 static int mv643xx_eth_open(struct net_device *dev); 296 static int mv643xx_eth_stop(struct net_device *dev); 297 298 299 /* per-port *****************************************************************/ 300 struct mib_counters { 301 u64 good_octets_received; 302 u32 bad_octets_received; 303 u32 internal_mac_transmit_err; 304 u32 good_frames_received; 305 u32 bad_frames_received; 306 u32 broadcast_frames_received; 307 u32 multicast_frames_received; 308 u32 frames_64_octets; 309 u32 frames_65_to_127_octets; 310 u32 frames_128_to_255_octets; 311 u32 frames_256_to_511_octets; 312 u32 frames_512_to_1023_octets; 313 u32 frames_1024_to_max_octets; 314 u64 good_octets_sent; 315 u32 good_frames_sent; 316 u32 excessive_collision; 317 u32 multicast_frames_sent; 318 u32 broadcast_frames_sent; 319 u32 unrec_mac_control_received; 320 u32 fc_sent; 321 u32 good_fc_received; 322 u32 bad_fc_received; 323 u32 undersize_received; 324 u32 fragments_received; 325 u32 oversize_received; 326 u32 jabber_received; 327 u32 mac_receive_error; 328 u32 bad_crc_event; 329 u32 collision; 330 u32 late_collision; 331 /* Non MIB hardware counters */ 332 u32 rx_discard; 333 u32 rx_overrun; 334 }; 335 336 struct rx_queue { 337 int index; 338 339 int rx_ring_size; 340 341 int rx_desc_count; 342 int rx_curr_desc; 343 int rx_used_desc; 344 345 struct rx_desc *rx_desc_area; 346 dma_addr_t rx_desc_dma; 347 int rx_desc_area_size; 348 struct sk_buff **rx_skb; 349 }; 350 351 struct tx_queue { 352 int index; 353 354 int tx_ring_size; 355 356 int tx_desc_count; 357 int tx_curr_desc; 358 int tx_used_desc; 359 360 int tx_stop_threshold; 361 int tx_wake_threshold; 362 363 char *tso_hdrs; 364 dma_addr_t tso_hdrs_dma; 365 366 struct tx_desc *tx_desc_area; 367 char *tx_desc_mapping; /* array to track the type of the dma mapping */ 368 dma_addr_t tx_desc_dma; 369 int tx_desc_area_size; 370 371 struct sk_buff_head tx_skb; 372 373 unsigned long tx_packets; 374 unsigned long tx_bytes; 375 unsigned long tx_dropped; 376 }; 377 378 struct mv643xx_eth_private { 379 struct mv643xx_eth_shared_private *shared; 380 void __iomem *base; 381 int port_num; 382 383 struct net_device *dev; 384 385 struct timer_list mib_counters_timer; 386 spinlock_t mib_counters_lock; 387 struct mib_counters mib_counters; 388 389 struct work_struct tx_timeout_task; 390 391 struct napi_struct napi; 392 u32 int_mask; 393 u8 oom; 394 u8 work_link; 395 u8 work_tx; 396 u8 work_tx_end; 397 u8 work_rx; 398 u8 work_rx_refill; 399 400 int skb_size; 401 402 /* 403 * RX state. 404 */ 405 int rx_ring_size; 406 unsigned long rx_desc_sram_addr; 407 int rx_desc_sram_size; 408 int rxq_count; 409 struct timer_list rx_oom; 410 struct rx_queue rxq[8]; 411 412 /* 413 * TX state. 414 */ 415 int tx_ring_size; 416 unsigned long tx_desc_sram_addr; 417 int tx_desc_sram_size; 418 int txq_count; 419 struct tx_queue txq[8]; 420 421 /* 422 * Hardware-specific parameters. 423 */ 424 struct clk *clk; 425 unsigned int t_clk; 426 }; 427 428 429 /* port register accessors **************************************************/ 430 static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) 431 { 432 return readl(mp->shared->base + offset); 433 } 434 435 static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) 436 { 437 return readl(mp->base + offset); 438 } 439 440 static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) 441 { 442 writel(data, mp->shared->base + offset); 443 } 444 445 static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) 446 { 447 writel(data, mp->base + offset); 448 } 449 450 451 /* rxq/txq helper functions *************************************************/ 452 static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 453 { 454 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); 455 } 456 457 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) 458 { 459 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); 460 } 461 462 static void rxq_enable(struct rx_queue *rxq) 463 { 464 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 465 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); 466 } 467 468 static void rxq_disable(struct rx_queue *rxq) 469 { 470 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 471 u8 mask = 1 << rxq->index; 472 473 wrlp(mp, RXQ_COMMAND, mask << 8); 474 while (rdlp(mp, RXQ_COMMAND) & mask) 475 udelay(10); 476 } 477 478 static void txq_reset_hw_ptr(struct tx_queue *txq) 479 { 480 struct mv643xx_eth_private *mp = txq_to_mp(txq); 481 u32 addr; 482 483 addr = (u32)txq->tx_desc_dma; 484 addr += txq->tx_curr_desc * sizeof(struct tx_desc); 485 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); 486 } 487 488 static void txq_enable(struct tx_queue *txq) 489 { 490 struct mv643xx_eth_private *mp = txq_to_mp(txq); 491 wrlp(mp, TXQ_COMMAND, 1 << txq->index); 492 } 493 494 static void txq_disable(struct tx_queue *txq) 495 { 496 struct mv643xx_eth_private *mp = txq_to_mp(txq); 497 u8 mask = 1 << txq->index; 498 499 wrlp(mp, TXQ_COMMAND, mask << 8); 500 while (rdlp(mp, TXQ_COMMAND) & mask) 501 udelay(10); 502 } 503 504 static void txq_maybe_wake(struct tx_queue *txq) 505 { 506 struct mv643xx_eth_private *mp = txq_to_mp(txq); 507 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 508 509 if (netif_tx_queue_stopped(nq)) { 510 __netif_tx_lock(nq, smp_processor_id()); 511 if (txq->tx_desc_count <= txq->tx_wake_threshold) 512 netif_tx_wake_queue(nq); 513 __netif_tx_unlock(nq); 514 } 515 } 516 517 static int rxq_process(struct rx_queue *rxq, int budget) 518 { 519 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 520 struct net_device_stats *stats = &mp->dev->stats; 521 int rx; 522 523 rx = 0; 524 while (rx < budget && rxq->rx_desc_count) { 525 struct rx_desc *rx_desc; 526 unsigned int cmd_sts; 527 struct sk_buff *skb; 528 u16 byte_cnt; 529 530 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 531 532 cmd_sts = rx_desc->cmd_sts; 533 if (cmd_sts & BUFFER_OWNED_BY_DMA) 534 break; 535 rmb(); 536 537 skb = rxq->rx_skb[rxq->rx_curr_desc]; 538 rxq->rx_skb[rxq->rx_curr_desc] = NULL; 539 540 rxq->rx_curr_desc++; 541 if (rxq->rx_curr_desc == rxq->rx_ring_size) 542 rxq->rx_curr_desc = 0; 543 544 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, 545 rx_desc->buf_size, DMA_FROM_DEVICE); 546 rxq->rx_desc_count--; 547 rx++; 548 549 mp->work_rx_refill |= 1 << rxq->index; 550 551 byte_cnt = rx_desc->byte_cnt; 552 553 /* 554 * Update statistics. 555 * 556 * Note that the descriptor byte count includes 2 dummy 557 * bytes automatically inserted by the hardware at the 558 * start of the packet (which we don't count), and a 4 559 * byte CRC at the end of the packet (which we do count). 560 */ 561 stats->rx_packets++; 562 stats->rx_bytes += byte_cnt - 2; 563 564 /* 565 * In case we received a packet without first / last bits 566 * on, or the error summary bit is set, the packet needs 567 * to be dropped. 568 */ 569 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) 570 != (RX_FIRST_DESC | RX_LAST_DESC)) 571 goto err; 572 573 /* 574 * The -4 is for the CRC in the trailer of the 575 * received packet 576 */ 577 skb_put(skb, byte_cnt - 2 - 4); 578 579 if (cmd_sts & LAYER_4_CHECKSUM_OK) 580 skb->ip_summed = CHECKSUM_UNNECESSARY; 581 skb->protocol = eth_type_trans(skb, mp->dev); 582 583 napi_gro_receive(&mp->napi, skb); 584 585 continue; 586 587 err: 588 stats->rx_dropped++; 589 590 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 591 (RX_FIRST_DESC | RX_LAST_DESC)) { 592 if (net_ratelimit()) 593 netdev_err(mp->dev, 594 "received packet spanning multiple descriptors\n"); 595 } 596 597 if (cmd_sts & ERROR_SUMMARY) 598 stats->rx_errors++; 599 600 dev_kfree_skb(skb); 601 } 602 603 if (rx < budget) 604 mp->work_rx &= ~(1 << rxq->index); 605 606 return rx; 607 } 608 609 static int rxq_refill(struct rx_queue *rxq, int budget) 610 { 611 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 612 int refilled; 613 614 refilled = 0; 615 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 616 struct sk_buff *skb; 617 int rx; 618 struct rx_desc *rx_desc; 619 int size; 620 621 skb = netdev_alloc_skb(mp->dev, mp->skb_size); 622 623 if (skb == NULL) { 624 mp->oom = 1; 625 goto oom; 626 } 627 628 if (SKB_DMA_REALIGN) 629 skb_reserve(skb, SKB_DMA_REALIGN); 630 631 refilled++; 632 rxq->rx_desc_count++; 633 634 rx = rxq->rx_used_desc++; 635 if (rxq->rx_used_desc == rxq->rx_ring_size) 636 rxq->rx_used_desc = 0; 637 638 rx_desc = rxq->rx_desc_area + rx; 639 640 size = skb_end_pointer(skb) - skb->data; 641 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, 642 skb->data, size, 643 DMA_FROM_DEVICE); 644 rx_desc->buf_size = size; 645 rxq->rx_skb[rx] = skb; 646 wmb(); 647 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; 648 wmb(); 649 650 /* 651 * The hardware automatically prepends 2 bytes of 652 * dummy data to each received packet, so that the 653 * IP header ends up 16-byte aligned. 654 */ 655 skb_reserve(skb, 2); 656 } 657 658 if (refilled < budget) 659 mp->work_rx_refill &= ~(1 << rxq->index); 660 661 oom: 662 return refilled; 663 } 664 665 666 /* tx ***********************************************************************/ 667 static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) 668 { 669 int frag; 670 671 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 672 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 673 674 if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7) 675 return 1; 676 } 677 678 return 0; 679 } 680 681 static inline __be16 sum16_as_be(__sum16 sum) 682 { 683 return (__force __be16)sum; 684 } 685 686 static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb, 687 u16 *l4i_chk, u32 *command, int length) 688 { 689 int ret; 690 u32 cmd = 0; 691 692 if (skb->ip_summed == CHECKSUM_PARTIAL) { 693 int hdr_len; 694 int tag_bytes; 695 696 BUG_ON(skb->protocol != htons(ETH_P_IP) && 697 skb->protocol != htons(ETH_P_8021Q)); 698 699 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; 700 tag_bytes = hdr_len - ETH_HLEN; 701 702 if (length - hdr_len > mp->shared->tx_csum_limit || 703 unlikely(tag_bytes & ~12)) { 704 ret = skb_checksum_help(skb); 705 if (!ret) 706 goto no_csum; 707 return ret; 708 } 709 710 if (tag_bytes & 4) 711 cmd |= MAC_HDR_EXTRA_4_BYTES; 712 if (tag_bytes & 8) 713 cmd |= MAC_HDR_EXTRA_8_BYTES; 714 715 cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL | 716 GEN_IP_V4_CHECKSUM | 717 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 718 719 /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL 720 * it seems we don't need to pass the initial checksum. */ 721 switch (ip_hdr(skb)->protocol) { 722 case IPPROTO_UDP: 723 cmd |= UDP_FRAME; 724 *l4i_chk = 0; 725 break; 726 case IPPROTO_TCP: 727 *l4i_chk = 0; 728 break; 729 default: 730 WARN(1, "protocol not supported"); 731 } 732 } else { 733 no_csum: 734 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 735 cmd |= 5 << TX_IHL_SHIFT; 736 } 737 *command = cmd; 738 return 0; 739 } 740 741 static inline int 742 txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, 743 struct sk_buff *skb, char *data, int length, 744 bool last_tcp, bool is_last) 745 { 746 int tx_index; 747 u32 cmd_sts; 748 struct tx_desc *desc; 749 750 tx_index = txq->tx_curr_desc++; 751 if (txq->tx_curr_desc == txq->tx_ring_size) 752 txq->tx_curr_desc = 0; 753 desc = &txq->tx_desc_area[tx_index]; 754 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; 755 756 desc->l4i_chk = 0; 757 desc->byte_cnt = length; 758 759 if (length <= 8 && (uintptr_t)data & 0x7) { 760 /* Copy unaligned small data fragment to TSO header data area */ 761 memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE, 762 data, length); 763 desc->buf_ptr = txq->tso_hdrs_dma 764 + tx_index * TSO_HEADER_SIZE; 765 } else { 766 /* Alignment is okay, map buffer and hand off to hardware */ 767 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; 768 desc->buf_ptr = dma_map_single(dev->dev.parent, data, 769 length, DMA_TO_DEVICE); 770 if (unlikely(dma_mapping_error(dev->dev.parent, 771 desc->buf_ptr))) { 772 WARN(1, "dma_map_single failed!\n"); 773 return -ENOMEM; 774 } 775 } 776 777 cmd_sts = BUFFER_OWNED_BY_DMA; 778 if (last_tcp) { 779 /* last descriptor in the TCP packet */ 780 cmd_sts |= ZERO_PADDING | TX_LAST_DESC; 781 /* last descriptor in SKB */ 782 if (is_last) 783 cmd_sts |= TX_ENABLE_INTERRUPT; 784 } 785 desc->cmd_sts = cmd_sts; 786 return 0; 787 } 788 789 static inline void 790 txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length, 791 u32 *first_cmd_sts, bool first_desc) 792 { 793 struct mv643xx_eth_private *mp = txq_to_mp(txq); 794 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 795 int tx_index; 796 struct tx_desc *desc; 797 int ret; 798 u32 cmd_csum = 0; 799 u16 l4i_chk = 0; 800 u32 cmd_sts; 801 802 tx_index = txq->tx_curr_desc; 803 desc = &txq->tx_desc_area[tx_index]; 804 805 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length); 806 if (ret) 807 WARN(1, "failed to prepare checksum!"); 808 809 /* Should we set this? Can't use the value from skb_tx_csum() 810 * as it's not the correct initial L4 checksum to use. */ 811 desc->l4i_chk = 0; 812 813 desc->byte_cnt = hdr_len; 814 desc->buf_ptr = txq->tso_hdrs_dma + 815 txq->tx_curr_desc * TSO_HEADER_SIZE; 816 cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | 817 GEN_CRC; 818 819 /* Defer updating the first command descriptor until all 820 * following descriptors have been written. 821 */ 822 if (first_desc) 823 *first_cmd_sts = cmd_sts; 824 else 825 desc->cmd_sts = cmd_sts; 826 827 txq->tx_curr_desc++; 828 if (txq->tx_curr_desc == txq->tx_ring_size) 829 txq->tx_curr_desc = 0; 830 } 831 832 static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, 833 struct net_device *dev) 834 { 835 struct mv643xx_eth_private *mp = txq_to_mp(txq); 836 int total_len, data_left, ret; 837 int desc_count = 0; 838 struct tso_t tso; 839 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 840 struct tx_desc *first_tx_desc; 841 u32 first_cmd_sts = 0; 842 843 /* Count needed descriptors */ 844 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { 845 netdev_dbg(dev, "not enough descriptors for TSO!\n"); 846 return -EBUSY; 847 } 848 849 first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; 850 851 /* Initialize the TSO handler, and prepare the first payload */ 852 tso_start(skb, &tso); 853 854 total_len = skb->len - hdr_len; 855 while (total_len > 0) { 856 bool first_desc = (desc_count == 0); 857 char *hdr; 858 859 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 860 total_len -= data_left; 861 desc_count++; 862 863 /* prepare packet headers: MAC + IP + TCP */ 864 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; 865 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 866 txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts, 867 first_desc); 868 869 while (data_left > 0) { 870 int size; 871 desc_count++; 872 873 size = min_t(int, tso.size, data_left); 874 ret = txq_put_data_tso(dev, txq, skb, tso.data, size, 875 size == data_left, 876 total_len == 0); 877 if (ret) 878 goto err_release; 879 data_left -= size; 880 tso_build_data(skb, &tso, size); 881 } 882 } 883 884 __skb_queue_tail(&txq->tx_skb, skb); 885 skb_tx_timestamp(skb); 886 887 /* ensure all other descriptors are written before first cmd_sts */ 888 wmb(); 889 first_tx_desc->cmd_sts = first_cmd_sts; 890 891 /* clear TX_END status */ 892 mp->work_tx_end &= ~(1 << txq->index); 893 894 /* ensure all descriptors are written before poking hardware */ 895 wmb(); 896 txq_enable(txq); 897 txq->tx_desc_count += desc_count; 898 return 0; 899 err_release: 900 /* TODO: Release all used data descriptors; header descriptors must not 901 * be DMA-unmapped. 902 */ 903 return ret; 904 } 905 906 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 907 { 908 struct mv643xx_eth_private *mp = txq_to_mp(txq); 909 int nr_frags = skb_shinfo(skb)->nr_frags; 910 int frag; 911 912 for (frag = 0; frag < nr_frags; frag++) { 913 skb_frag_t *this_frag; 914 int tx_index; 915 struct tx_desc *desc; 916 917 this_frag = &skb_shinfo(skb)->frags[frag]; 918 tx_index = txq->tx_curr_desc++; 919 if (txq->tx_curr_desc == txq->tx_ring_size) 920 txq->tx_curr_desc = 0; 921 desc = &txq->tx_desc_area[tx_index]; 922 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; 923 924 /* 925 * The last fragment will generate an interrupt 926 * which will free the skb on TX completion. 927 */ 928 if (frag == nr_frags - 1) { 929 desc->cmd_sts = BUFFER_OWNED_BY_DMA | 930 ZERO_PADDING | TX_LAST_DESC | 931 TX_ENABLE_INTERRUPT; 932 } else { 933 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 934 } 935 936 desc->l4i_chk = 0; 937 desc->byte_cnt = skb_frag_size(this_frag); 938 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, 939 this_frag, 0, desc->byte_cnt, 940 DMA_TO_DEVICE); 941 } 942 } 943 944 static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, 945 struct net_device *dev) 946 { 947 struct mv643xx_eth_private *mp = txq_to_mp(txq); 948 int nr_frags = skb_shinfo(skb)->nr_frags; 949 int tx_index; 950 struct tx_desc *desc; 951 u32 cmd_sts; 952 u16 l4i_chk; 953 int length, ret; 954 955 cmd_sts = 0; 956 l4i_chk = 0; 957 958 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 959 if (net_ratelimit()) 960 netdev_err(dev, "tx queue full?!\n"); 961 return -EBUSY; 962 } 963 964 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); 965 if (ret) 966 return ret; 967 cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 968 969 tx_index = txq->tx_curr_desc++; 970 if (txq->tx_curr_desc == txq->tx_ring_size) 971 txq->tx_curr_desc = 0; 972 desc = &txq->tx_desc_area[tx_index]; 973 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; 974 975 if (nr_frags) { 976 txq_submit_frag_skb(txq, skb); 977 length = skb_headlen(skb); 978 } else { 979 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; 980 length = skb->len; 981 } 982 983 desc->l4i_chk = l4i_chk; 984 desc->byte_cnt = length; 985 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, 986 length, DMA_TO_DEVICE); 987 988 __skb_queue_tail(&txq->tx_skb, skb); 989 990 skb_tx_timestamp(skb); 991 992 /* ensure all other descriptors are written before first cmd_sts */ 993 wmb(); 994 desc->cmd_sts = cmd_sts; 995 996 /* clear TX_END status */ 997 mp->work_tx_end &= ~(1 << txq->index); 998 999 /* ensure all descriptors are written before poking hardware */ 1000 wmb(); 1001 txq_enable(txq); 1002 1003 txq->tx_desc_count += nr_frags + 1; 1004 1005 return 0; 1006 } 1007 1008 static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1009 { 1010 struct mv643xx_eth_private *mp = netdev_priv(dev); 1011 int length, queue, ret; 1012 struct tx_queue *txq; 1013 struct netdev_queue *nq; 1014 1015 queue = skb_get_queue_mapping(skb); 1016 txq = mp->txq + queue; 1017 nq = netdev_get_tx_queue(dev, queue); 1018 1019 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 1020 netdev_printk(KERN_DEBUG, dev, 1021 "failed to linearize skb with tiny unaligned fragment\n"); 1022 return NETDEV_TX_BUSY; 1023 } 1024 1025 length = skb->len; 1026 1027 if (skb_is_gso(skb)) 1028 ret = txq_submit_tso(txq, skb, dev); 1029 else 1030 ret = txq_submit_skb(txq, skb, dev); 1031 if (!ret) { 1032 txq->tx_bytes += length; 1033 txq->tx_packets++; 1034 1035 if (txq->tx_desc_count >= txq->tx_stop_threshold) 1036 netif_tx_stop_queue(nq); 1037 } else { 1038 txq->tx_dropped++; 1039 dev_kfree_skb_any(skb); 1040 } 1041 1042 return NETDEV_TX_OK; 1043 } 1044 1045 1046 /* tx napi ******************************************************************/ 1047 static void txq_kick(struct tx_queue *txq) 1048 { 1049 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1050 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 1051 u32 hw_desc_ptr; 1052 u32 expected_ptr; 1053 1054 __netif_tx_lock(nq, smp_processor_id()); 1055 1056 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) 1057 goto out; 1058 1059 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); 1060 expected_ptr = (u32)txq->tx_desc_dma + 1061 txq->tx_curr_desc * sizeof(struct tx_desc); 1062 1063 if (hw_desc_ptr != expected_ptr) 1064 txq_enable(txq); 1065 1066 out: 1067 __netif_tx_unlock(nq); 1068 1069 mp->work_tx_end &= ~(1 << txq->index); 1070 } 1071 1072 static int txq_reclaim(struct tx_queue *txq, int budget, int force) 1073 { 1074 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1075 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 1076 int reclaimed; 1077 1078 __netif_tx_lock_bh(nq); 1079 1080 reclaimed = 0; 1081 while (reclaimed < budget && txq->tx_desc_count > 0) { 1082 int tx_index; 1083 struct tx_desc *desc; 1084 u32 cmd_sts; 1085 char desc_dma_map; 1086 1087 tx_index = txq->tx_used_desc; 1088 desc = &txq->tx_desc_area[tx_index]; 1089 desc_dma_map = txq->tx_desc_mapping[tx_index]; 1090 1091 cmd_sts = desc->cmd_sts; 1092 1093 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 1094 if (!force) 1095 break; 1096 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; 1097 } 1098 1099 txq->tx_used_desc = tx_index + 1; 1100 if (txq->tx_used_desc == txq->tx_ring_size) 1101 txq->tx_used_desc = 0; 1102 1103 reclaimed++; 1104 txq->tx_desc_count--; 1105 1106 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { 1107 1108 if (desc_dma_map == DESC_DMA_MAP_PAGE) 1109 dma_unmap_page(mp->dev->dev.parent, 1110 desc->buf_ptr, 1111 desc->byte_cnt, 1112 DMA_TO_DEVICE); 1113 else 1114 dma_unmap_single(mp->dev->dev.parent, 1115 desc->buf_ptr, 1116 desc->byte_cnt, 1117 DMA_TO_DEVICE); 1118 } 1119 1120 if (cmd_sts & TX_ENABLE_INTERRUPT) { 1121 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); 1122 1123 if (!WARN_ON(!skb)) 1124 dev_consume_skb_any(skb); 1125 } 1126 1127 if (cmd_sts & ERROR_SUMMARY) { 1128 netdev_info(mp->dev, "tx error\n"); 1129 mp->dev->stats.tx_errors++; 1130 } 1131 1132 } 1133 1134 __netif_tx_unlock_bh(nq); 1135 1136 if (reclaimed < budget) 1137 mp->work_tx &= ~(1 << txq->index); 1138 1139 return reclaimed; 1140 } 1141 1142 1143 /* tx rate control **********************************************************/ 1144 /* 1145 * Set total maximum TX rate (shared by all TX queues for this port) 1146 * to 'rate' bits per second, with a maximum burst of 'burst' bytes. 1147 */ 1148 static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) 1149 { 1150 int token_rate; 1151 int mtu; 1152 int bucket_size; 1153 1154 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); 1155 if (token_rate > 1023) 1156 token_rate = 1023; 1157 1158 mtu = (mp->dev->mtu + 255) >> 8; 1159 if (mtu > 63) 1160 mtu = 63; 1161 1162 bucket_size = (burst + 255) >> 8; 1163 if (bucket_size > 65535) 1164 bucket_size = 65535; 1165 1166 switch (mp->shared->tx_bw_control) { 1167 case TX_BW_CONTROL_OLD_LAYOUT: 1168 wrlp(mp, TX_BW_RATE, token_rate); 1169 wrlp(mp, TX_BW_MTU, mtu); 1170 wrlp(mp, TX_BW_BURST, bucket_size); 1171 break; 1172 case TX_BW_CONTROL_NEW_LAYOUT: 1173 wrlp(mp, TX_BW_RATE_MOVED, token_rate); 1174 wrlp(mp, TX_BW_MTU_MOVED, mtu); 1175 wrlp(mp, TX_BW_BURST_MOVED, bucket_size); 1176 break; 1177 } 1178 } 1179 1180 static void txq_set_rate(struct tx_queue *txq, int rate, int burst) 1181 { 1182 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1183 int token_rate; 1184 int bucket_size; 1185 1186 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); 1187 if (token_rate > 1023) 1188 token_rate = 1023; 1189 1190 bucket_size = (burst + 255) >> 8; 1191 if (bucket_size > 65535) 1192 bucket_size = 65535; 1193 1194 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); 1195 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); 1196 } 1197 1198 static void txq_set_fixed_prio_mode(struct tx_queue *txq) 1199 { 1200 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1201 int off; 1202 u32 val; 1203 1204 /* 1205 * Turn on fixed priority mode. 1206 */ 1207 off = 0; 1208 switch (mp->shared->tx_bw_control) { 1209 case TX_BW_CONTROL_OLD_LAYOUT: 1210 off = TXQ_FIX_PRIO_CONF; 1211 break; 1212 case TX_BW_CONTROL_NEW_LAYOUT: 1213 off = TXQ_FIX_PRIO_CONF_MOVED; 1214 break; 1215 } 1216 1217 if (off) { 1218 val = rdlp(mp, off); 1219 val |= 1 << txq->index; 1220 wrlp(mp, off, val); 1221 } 1222 } 1223 1224 1225 /* mii management interface *************************************************/ 1226 static void mv643xx_eth_adjust_link(struct net_device *dev) 1227 { 1228 struct mv643xx_eth_private *mp = netdev_priv(dev); 1229 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 1230 u32 autoneg_disable = FORCE_LINK_PASS | 1231 DISABLE_AUTO_NEG_SPEED_GMII | 1232 DISABLE_AUTO_NEG_FOR_FLOW_CTRL | 1233 DISABLE_AUTO_NEG_FOR_DUPLEX; 1234 1235 if (dev->phydev->autoneg == AUTONEG_ENABLE) { 1236 /* enable auto negotiation */ 1237 pscr &= ~autoneg_disable; 1238 goto out_write; 1239 } 1240 1241 pscr |= autoneg_disable; 1242 1243 if (dev->phydev->speed == SPEED_1000) { 1244 /* force gigabit, half duplex not supported */ 1245 pscr |= SET_GMII_SPEED_TO_1000; 1246 pscr |= SET_FULL_DUPLEX_MODE; 1247 goto out_write; 1248 } 1249 1250 pscr &= ~SET_GMII_SPEED_TO_1000; 1251 1252 if (dev->phydev->speed == SPEED_100) 1253 pscr |= SET_MII_SPEED_TO_100; 1254 else 1255 pscr &= ~SET_MII_SPEED_TO_100; 1256 1257 if (dev->phydev->duplex == DUPLEX_FULL) 1258 pscr |= SET_FULL_DUPLEX_MODE; 1259 else 1260 pscr &= ~SET_FULL_DUPLEX_MODE; 1261 1262 out_write: 1263 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 1264 } 1265 1266 /* statistics ***************************************************************/ 1267 static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1268 { 1269 struct mv643xx_eth_private *mp = netdev_priv(dev); 1270 struct net_device_stats *stats = &dev->stats; 1271 unsigned long tx_packets = 0; 1272 unsigned long tx_bytes = 0; 1273 unsigned long tx_dropped = 0; 1274 int i; 1275 1276 for (i = 0; i < mp->txq_count; i++) { 1277 struct tx_queue *txq = mp->txq + i; 1278 1279 tx_packets += txq->tx_packets; 1280 tx_bytes += txq->tx_bytes; 1281 tx_dropped += txq->tx_dropped; 1282 } 1283 1284 stats->tx_packets = tx_packets; 1285 stats->tx_bytes = tx_bytes; 1286 stats->tx_dropped = tx_dropped; 1287 1288 return stats; 1289 } 1290 1291 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1292 { 1293 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1294 } 1295 1296 static void mib_counters_clear(struct mv643xx_eth_private *mp) 1297 { 1298 int i; 1299 1300 for (i = 0; i < 0x80; i += 4) 1301 mib_read(mp, i); 1302 1303 /* Clear non MIB hw counters also */ 1304 rdlp(mp, RX_DISCARD_FRAME_CNT); 1305 rdlp(mp, RX_OVERRUN_FRAME_CNT); 1306 } 1307 1308 static void mib_counters_update(struct mv643xx_eth_private *mp) 1309 { 1310 struct mib_counters *p = &mp->mib_counters; 1311 1312 spin_lock_bh(&mp->mib_counters_lock); 1313 p->good_octets_received += mib_read(mp, 0x00); 1314 p->bad_octets_received += mib_read(mp, 0x08); 1315 p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1316 p->good_frames_received += mib_read(mp, 0x10); 1317 p->bad_frames_received += mib_read(mp, 0x14); 1318 p->broadcast_frames_received += mib_read(mp, 0x18); 1319 p->multicast_frames_received += mib_read(mp, 0x1c); 1320 p->frames_64_octets += mib_read(mp, 0x20); 1321 p->frames_65_to_127_octets += mib_read(mp, 0x24); 1322 p->frames_128_to_255_octets += mib_read(mp, 0x28); 1323 p->frames_256_to_511_octets += mib_read(mp, 0x2c); 1324 p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1325 p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1326 p->good_octets_sent += mib_read(mp, 0x38); 1327 p->good_frames_sent += mib_read(mp, 0x40); 1328 p->excessive_collision += mib_read(mp, 0x44); 1329 p->multicast_frames_sent += mib_read(mp, 0x48); 1330 p->broadcast_frames_sent += mib_read(mp, 0x4c); 1331 p->unrec_mac_control_received += mib_read(mp, 0x50); 1332 p->fc_sent += mib_read(mp, 0x54); 1333 p->good_fc_received += mib_read(mp, 0x58); 1334 p->bad_fc_received += mib_read(mp, 0x5c); 1335 p->undersize_received += mib_read(mp, 0x60); 1336 p->fragments_received += mib_read(mp, 0x64); 1337 p->oversize_received += mib_read(mp, 0x68); 1338 p->jabber_received += mib_read(mp, 0x6c); 1339 p->mac_receive_error += mib_read(mp, 0x70); 1340 p->bad_crc_event += mib_read(mp, 0x74); 1341 p->collision += mib_read(mp, 0x78); 1342 p->late_collision += mib_read(mp, 0x7c); 1343 /* Non MIB hardware counters */ 1344 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); 1345 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); 1346 spin_unlock_bh(&mp->mib_counters_lock); 1347 } 1348 1349 static void mib_counters_timer_wrapper(struct timer_list *t) 1350 { 1351 struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer); 1352 mib_counters_update(mp); 1353 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1354 } 1355 1356 1357 /* interrupt coalescing *****************************************************/ 1358 /* 1359 * Hardware coalescing parameters are set in units of 64 t_clk 1360 * cycles. I.e.: 1361 * 1362 * coal_delay_in_usec = 64000000 * register_value / t_clk_rate 1363 * 1364 * register_value = coal_delay_in_usec * t_clk_rate / 64000000 1365 * 1366 * In the ->set*() methods, we round the computed register value 1367 * to the nearest integer. 1368 */ 1369 static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) 1370 { 1371 u32 val = rdlp(mp, SDMA_CONFIG); 1372 u64 temp; 1373 1374 if (mp->shared->extended_rx_coal_limit) 1375 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); 1376 else 1377 temp = (val & 0x003fff00) >> 8; 1378 1379 temp *= 64000000; 1380 temp += mp->t_clk / 2; 1381 do_div(temp, mp->t_clk); 1382 1383 return (unsigned int)temp; 1384 } 1385 1386 static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1387 { 1388 u64 temp; 1389 u32 val; 1390 1391 temp = (u64)usec * mp->t_clk; 1392 temp += 31999999; 1393 do_div(temp, 64000000); 1394 1395 val = rdlp(mp, SDMA_CONFIG); 1396 if (mp->shared->extended_rx_coal_limit) { 1397 if (temp > 0xffff) 1398 temp = 0xffff; 1399 val &= ~0x023fff80; 1400 val |= (temp & 0x8000) << 10; 1401 val |= (temp & 0x7fff) << 7; 1402 } else { 1403 if (temp > 0x3fff) 1404 temp = 0x3fff; 1405 val &= ~0x003fff00; 1406 val |= (temp & 0x3fff) << 8; 1407 } 1408 wrlp(mp, SDMA_CONFIG, val); 1409 } 1410 1411 static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) 1412 { 1413 u64 temp; 1414 1415 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1416 temp *= 64000000; 1417 temp += mp->t_clk / 2; 1418 do_div(temp, mp->t_clk); 1419 1420 return (unsigned int)temp; 1421 } 1422 1423 static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1424 { 1425 u64 temp; 1426 1427 temp = (u64)usec * mp->t_clk; 1428 temp += 31999999; 1429 do_div(temp, 64000000); 1430 1431 if (temp > 0x3fff) 1432 temp = 0x3fff; 1433 1434 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); 1435 } 1436 1437 1438 /* ethtool ******************************************************************/ 1439 struct mv643xx_eth_stats { 1440 char stat_string[ETH_GSTRING_LEN]; 1441 int sizeof_stat; 1442 int netdev_off; 1443 int mp_off; 1444 }; 1445 1446 #define SSTAT(m) \ 1447 { #m, FIELD_SIZEOF(struct net_device_stats, m), \ 1448 offsetof(struct net_device, stats.m), -1 } 1449 1450 #define MIBSTAT(m) \ 1451 { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1452 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1453 1454 static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { 1455 SSTAT(rx_packets), 1456 SSTAT(tx_packets), 1457 SSTAT(rx_bytes), 1458 SSTAT(tx_bytes), 1459 SSTAT(rx_errors), 1460 SSTAT(tx_errors), 1461 SSTAT(rx_dropped), 1462 SSTAT(tx_dropped), 1463 MIBSTAT(good_octets_received), 1464 MIBSTAT(bad_octets_received), 1465 MIBSTAT(internal_mac_transmit_err), 1466 MIBSTAT(good_frames_received), 1467 MIBSTAT(bad_frames_received), 1468 MIBSTAT(broadcast_frames_received), 1469 MIBSTAT(multicast_frames_received), 1470 MIBSTAT(frames_64_octets), 1471 MIBSTAT(frames_65_to_127_octets), 1472 MIBSTAT(frames_128_to_255_octets), 1473 MIBSTAT(frames_256_to_511_octets), 1474 MIBSTAT(frames_512_to_1023_octets), 1475 MIBSTAT(frames_1024_to_max_octets), 1476 MIBSTAT(good_octets_sent), 1477 MIBSTAT(good_frames_sent), 1478 MIBSTAT(excessive_collision), 1479 MIBSTAT(multicast_frames_sent), 1480 MIBSTAT(broadcast_frames_sent), 1481 MIBSTAT(unrec_mac_control_received), 1482 MIBSTAT(fc_sent), 1483 MIBSTAT(good_fc_received), 1484 MIBSTAT(bad_fc_received), 1485 MIBSTAT(undersize_received), 1486 MIBSTAT(fragments_received), 1487 MIBSTAT(oversize_received), 1488 MIBSTAT(jabber_received), 1489 MIBSTAT(mac_receive_error), 1490 MIBSTAT(bad_crc_event), 1491 MIBSTAT(collision), 1492 MIBSTAT(late_collision), 1493 MIBSTAT(rx_discard), 1494 MIBSTAT(rx_overrun), 1495 }; 1496 1497 static int 1498 mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp, 1499 struct ethtool_link_ksettings *cmd) 1500 { 1501 struct net_device *dev = mp->dev; 1502 u32 supported, advertising; 1503 1504 phy_ethtool_ksettings_get(dev->phydev, cmd); 1505 1506 /* 1507 * The MAC does not support 1000baseT_Half. 1508 */ 1509 ethtool_convert_link_mode_to_legacy_u32(&supported, 1510 cmd->link_modes.supported); 1511 ethtool_convert_link_mode_to_legacy_u32(&advertising, 1512 cmd->link_modes.advertising); 1513 supported &= ~SUPPORTED_1000baseT_Half; 1514 advertising &= ~ADVERTISED_1000baseT_Half; 1515 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1516 supported); 1517 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1518 advertising); 1519 1520 return 0; 1521 } 1522 1523 static int 1524 mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp, 1525 struct ethtool_link_ksettings *cmd) 1526 { 1527 u32 port_status; 1528 u32 supported, advertising; 1529 1530 port_status = rdlp(mp, PORT_STATUS); 1531 1532 supported = SUPPORTED_MII; 1533 advertising = ADVERTISED_MII; 1534 switch (port_status & PORT_SPEED_MASK) { 1535 case PORT_SPEED_10: 1536 cmd->base.speed = SPEED_10; 1537 break; 1538 case PORT_SPEED_100: 1539 cmd->base.speed = SPEED_100; 1540 break; 1541 case PORT_SPEED_1000: 1542 cmd->base.speed = SPEED_1000; 1543 break; 1544 default: 1545 cmd->base.speed = -1; 1546 break; 1547 } 1548 cmd->base.duplex = (port_status & FULL_DUPLEX) ? 1549 DUPLEX_FULL : DUPLEX_HALF; 1550 cmd->base.port = PORT_MII; 1551 cmd->base.phy_address = 0; 1552 cmd->base.autoneg = AUTONEG_DISABLE; 1553 1554 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1555 supported); 1556 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1557 advertising); 1558 1559 return 0; 1560 } 1561 1562 static void 1563 mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1564 { 1565 wol->supported = 0; 1566 wol->wolopts = 0; 1567 if (dev->phydev) 1568 phy_ethtool_get_wol(dev->phydev, wol); 1569 } 1570 1571 static int 1572 mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1573 { 1574 int err; 1575 1576 if (!dev->phydev) 1577 return -EOPNOTSUPP; 1578 1579 err = phy_ethtool_set_wol(dev->phydev, wol); 1580 /* Given that mv643xx_eth works without the marvell-specific PHY driver, 1581 * this debugging hint is useful to have. 1582 */ 1583 if (err == -EOPNOTSUPP) 1584 netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n"); 1585 return err; 1586 } 1587 1588 static int 1589 mv643xx_eth_get_link_ksettings(struct net_device *dev, 1590 struct ethtool_link_ksettings *cmd) 1591 { 1592 struct mv643xx_eth_private *mp = netdev_priv(dev); 1593 1594 if (dev->phydev) 1595 return mv643xx_eth_get_link_ksettings_phy(mp, cmd); 1596 else 1597 return mv643xx_eth_get_link_ksettings_phyless(mp, cmd); 1598 } 1599 1600 static int 1601 mv643xx_eth_set_link_ksettings(struct net_device *dev, 1602 const struct ethtool_link_ksettings *cmd) 1603 { 1604 struct ethtool_link_ksettings c = *cmd; 1605 u32 advertising; 1606 int ret; 1607 1608 if (!dev->phydev) 1609 return -EINVAL; 1610 1611 /* 1612 * The MAC does not support 1000baseT_Half. 1613 */ 1614 ethtool_convert_link_mode_to_legacy_u32(&advertising, 1615 c.link_modes.advertising); 1616 advertising &= ~ADVERTISED_1000baseT_Half; 1617 ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising, 1618 advertising); 1619 1620 ret = phy_ethtool_ksettings_set(dev->phydev, &c); 1621 if (!ret) 1622 mv643xx_eth_adjust_link(dev); 1623 return ret; 1624 } 1625 1626 static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1627 struct ethtool_drvinfo *drvinfo) 1628 { 1629 strlcpy(drvinfo->driver, mv643xx_eth_driver_name, 1630 sizeof(drvinfo->driver)); 1631 strlcpy(drvinfo->version, mv643xx_eth_driver_version, 1632 sizeof(drvinfo->version)); 1633 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 1634 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); 1635 } 1636 1637 static int 1638 mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1639 { 1640 struct mv643xx_eth_private *mp = netdev_priv(dev); 1641 1642 ec->rx_coalesce_usecs = get_rx_coal(mp); 1643 ec->tx_coalesce_usecs = get_tx_coal(mp); 1644 1645 return 0; 1646 } 1647 1648 static int 1649 mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1650 { 1651 struct mv643xx_eth_private *mp = netdev_priv(dev); 1652 1653 set_rx_coal(mp, ec->rx_coalesce_usecs); 1654 set_tx_coal(mp, ec->tx_coalesce_usecs); 1655 1656 return 0; 1657 } 1658 1659 static void 1660 mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1661 { 1662 struct mv643xx_eth_private *mp = netdev_priv(dev); 1663 1664 er->rx_max_pending = 4096; 1665 er->tx_max_pending = 4096; 1666 1667 er->rx_pending = mp->rx_ring_size; 1668 er->tx_pending = mp->tx_ring_size; 1669 } 1670 1671 static int 1672 mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1673 { 1674 struct mv643xx_eth_private *mp = netdev_priv(dev); 1675 1676 if (er->rx_mini_pending || er->rx_jumbo_pending) 1677 return -EINVAL; 1678 1679 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; 1680 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, 1681 MV643XX_MAX_SKB_DESCS * 2, 4096); 1682 if (mp->tx_ring_size != er->tx_pending) 1683 netdev_warn(dev, "TX queue size set to %u (requested %u)\n", 1684 mp->tx_ring_size, er->tx_pending); 1685 1686 if (netif_running(dev)) { 1687 mv643xx_eth_stop(dev); 1688 if (mv643xx_eth_open(dev)) { 1689 netdev_err(dev, 1690 "fatal error on re-opening device after ring param change\n"); 1691 return -ENOMEM; 1692 } 1693 } 1694 1695 return 0; 1696 } 1697 1698 1699 static int 1700 mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features) 1701 { 1702 struct mv643xx_eth_private *mp = netdev_priv(dev); 1703 bool rx_csum = features & NETIF_F_RXCSUM; 1704 1705 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); 1706 1707 return 0; 1708 } 1709 1710 static void mv643xx_eth_get_strings(struct net_device *dev, 1711 uint32_t stringset, uint8_t *data) 1712 { 1713 int i; 1714 1715 if (stringset == ETH_SS_STATS) { 1716 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1717 memcpy(data + i * ETH_GSTRING_LEN, 1718 mv643xx_eth_stats[i].stat_string, 1719 ETH_GSTRING_LEN); 1720 } 1721 } 1722 } 1723 1724 static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, 1725 struct ethtool_stats *stats, 1726 uint64_t *data) 1727 { 1728 struct mv643xx_eth_private *mp = netdev_priv(dev); 1729 int i; 1730 1731 mv643xx_eth_get_stats(dev); 1732 mib_counters_update(mp); 1733 1734 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1735 const struct mv643xx_eth_stats *stat; 1736 void *p; 1737 1738 stat = mv643xx_eth_stats + i; 1739 1740 if (stat->netdev_off >= 0) 1741 p = ((void *)mp->dev) + stat->netdev_off; 1742 else 1743 p = ((void *)mp) + stat->mp_off; 1744 1745 data[i] = (stat->sizeof_stat == 8) ? 1746 *(uint64_t *)p : *(uint32_t *)p; 1747 } 1748 } 1749 1750 static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) 1751 { 1752 if (sset == ETH_SS_STATS) 1753 return ARRAY_SIZE(mv643xx_eth_stats); 1754 1755 return -EOPNOTSUPP; 1756 } 1757 1758 static const struct ethtool_ops mv643xx_eth_ethtool_ops = { 1759 .get_drvinfo = mv643xx_eth_get_drvinfo, 1760 .nway_reset = phy_ethtool_nway_reset, 1761 .get_link = ethtool_op_get_link, 1762 .get_coalesce = mv643xx_eth_get_coalesce, 1763 .set_coalesce = mv643xx_eth_set_coalesce, 1764 .get_ringparam = mv643xx_eth_get_ringparam, 1765 .set_ringparam = mv643xx_eth_set_ringparam, 1766 .get_strings = mv643xx_eth_get_strings, 1767 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1768 .get_sset_count = mv643xx_eth_get_sset_count, 1769 .get_ts_info = ethtool_op_get_ts_info, 1770 .get_wol = mv643xx_eth_get_wol, 1771 .set_wol = mv643xx_eth_set_wol, 1772 .get_link_ksettings = mv643xx_eth_get_link_ksettings, 1773 .set_link_ksettings = mv643xx_eth_set_link_ksettings, 1774 }; 1775 1776 1777 /* address handling *********************************************************/ 1778 static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) 1779 { 1780 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); 1781 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); 1782 1783 addr[0] = (mac_h >> 24) & 0xff; 1784 addr[1] = (mac_h >> 16) & 0xff; 1785 addr[2] = (mac_h >> 8) & 0xff; 1786 addr[3] = mac_h & 0xff; 1787 addr[4] = (mac_l >> 8) & 0xff; 1788 addr[5] = mac_l & 0xff; 1789 } 1790 1791 static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) 1792 { 1793 wrlp(mp, MAC_ADDR_HIGH, 1794 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); 1795 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); 1796 } 1797 1798 static u32 uc_addr_filter_mask(struct net_device *dev) 1799 { 1800 struct netdev_hw_addr *ha; 1801 u32 nibbles; 1802 1803 if (dev->flags & IFF_PROMISC) 1804 return 0; 1805 1806 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1807 netdev_for_each_uc_addr(ha, dev) { 1808 if (memcmp(dev->dev_addr, ha->addr, 5)) 1809 return 0; 1810 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1811 return 0; 1812 1813 nibbles |= 1 << (ha->addr[5] & 0x0f); 1814 } 1815 1816 return nibbles; 1817 } 1818 1819 static void mv643xx_eth_program_unicast_filter(struct net_device *dev) 1820 { 1821 struct mv643xx_eth_private *mp = netdev_priv(dev); 1822 u32 port_config; 1823 u32 nibbles; 1824 int i; 1825 1826 uc_addr_set(mp, dev->dev_addr); 1827 1828 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; 1829 1830 nibbles = uc_addr_filter_mask(dev); 1831 if (!nibbles) { 1832 port_config |= UNICAST_PROMISCUOUS_MODE; 1833 nibbles = 0xffff; 1834 } 1835 1836 for (i = 0; i < 16; i += 4) { 1837 int off = UNICAST_TABLE(mp->port_num) + i; 1838 u32 v; 1839 1840 v = 0; 1841 if (nibbles & 1) 1842 v |= 0x00000001; 1843 if (nibbles & 2) 1844 v |= 0x00000100; 1845 if (nibbles & 4) 1846 v |= 0x00010000; 1847 if (nibbles & 8) 1848 v |= 0x01000000; 1849 nibbles >>= 4; 1850 1851 wrl(mp, off, v); 1852 } 1853 1854 wrlp(mp, PORT_CONFIG, port_config); 1855 } 1856 1857 static int addr_crc(unsigned char *addr) 1858 { 1859 int crc = 0; 1860 int i; 1861 1862 for (i = 0; i < 6; i++) { 1863 int j; 1864 1865 crc = (crc ^ addr[i]) << 8; 1866 for (j = 7; j >= 0; j--) { 1867 if (crc & (0x100 << j)) 1868 crc ^= 0x107 << j; 1869 } 1870 } 1871 1872 return crc; 1873 } 1874 1875 static void mv643xx_eth_program_multicast_filter(struct net_device *dev) 1876 { 1877 struct mv643xx_eth_private *mp = netdev_priv(dev); 1878 u32 *mc_spec; 1879 u32 *mc_other; 1880 struct netdev_hw_addr *ha; 1881 int i; 1882 1883 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) 1884 goto promiscuous; 1885 1886 /* Allocate both mc_spec and mc_other tables */ 1887 mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC); 1888 if (!mc_spec) 1889 goto promiscuous; 1890 mc_other = &mc_spec[64]; 1891 1892 netdev_for_each_mc_addr(ha, dev) { 1893 u8 *a = ha->addr; 1894 u32 *table; 1895 u8 entry; 1896 1897 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { 1898 table = mc_spec; 1899 entry = a[5]; 1900 } else { 1901 table = mc_other; 1902 entry = addr_crc(a); 1903 } 1904 1905 table[entry >> 2] |= 1 << (8 * (entry & 3)); 1906 } 1907 1908 for (i = 0; i < 64; i++) { 1909 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), 1910 mc_spec[i]); 1911 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), 1912 mc_other[i]); 1913 } 1914 1915 kfree(mc_spec); 1916 return; 1917 1918 promiscuous: 1919 for (i = 0; i < 64; i++) { 1920 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), 1921 0x01010101u); 1922 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), 1923 0x01010101u); 1924 } 1925 } 1926 1927 static void mv643xx_eth_set_rx_mode(struct net_device *dev) 1928 { 1929 mv643xx_eth_program_unicast_filter(dev); 1930 mv643xx_eth_program_multicast_filter(dev); 1931 } 1932 1933 static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) 1934 { 1935 struct sockaddr *sa = addr; 1936 1937 if (!is_valid_ether_addr(sa->sa_data)) 1938 return -EADDRNOTAVAIL; 1939 1940 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); 1941 1942 netif_addr_lock_bh(dev); 1943 mv643xx_eth_program_unicast_filter(dev); 1944 netif_addr_unlock_bh(dev); 1945 1946 return 0; 1947 } 1948 1949 1950 /* rx/tx queue initialisation ***********************************************/ 1951 static int rxq_init(struct mv643xx_eth_private *mp, int index) 1952 { 1953 struct rx_queue *rxq = mp->rxq + index; 1954 struct rx_desc *rx_desc; 1955 int size; 1956 int i; 1957 1958 rxq->index = index; 1959 1960 rxq->rx_ring_size = mp->rx_ring_size; 1961 1962 rxq->rx_desc_count = 0; 1963 rxq->rx_curr_desc = 0; 1964 rxq->rx_used_desc = 0; 1965 1966 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1967 1968 if (index == 0 && size <= mp->rx_desc_sram_size) { 1969 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1970 mp->rx_desc_sram_size); 1971 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1972 } else { 1973 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1974 size, &rxq->rx_desc_dma, 1975 GFP_KERNEL); 1976 } 1977 1978 if (rxq->rx_desc_area == NULL) { 1979 netdev_err(mp->dev, 1980 "can't allocate rx ring (%d bytes)\n", size); 1981 goto out; 1982 } 1983 memset(rxq->rx_desc_area, 0, size); 1984 1985 rxq->rx_desc_area_size = size; 1986 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), 1987 GFP_KERNEL); 1988 if (rxq->rx_skb == NULL) 1989 goto out_free; 1990 1991 rx_desc = rxq->rx_desc_area; 1992 for (i = 0; i < rxq->rx_ring_size; i++) { 1993 int nexti; 1994 1995 nexti = i + 1; 1996 if (nexti == rxq->rx_ring_size) 1997 nexti = 0; 1998 1999 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 2000 nexti * sizeof(struct rx_desc); 2001 } 2002 2003 return 0; 2004 2005 2006 out_free: 2007 if (index == 0 && size <= mp->rx_desc_sram_size) 2008 iounmap(rxq->rx_desc_area); 2009 else 2010 dma_free_coherent(mp->dev->dev.parent, size, 2011 rxq->rx_desc_area, 2012 rxq->rx_desc_dma); 2013 2014 out: 2015 return -ENOMEM; 2016 } 2017 2018 static void rxq_deinit(struct rx_queue *rxq) 2019 { 2020 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 2021 int i; 2022 2023 rxq_disable(rxq); 2024 2025 for (i = 0; i < rxq->rx_ring_size; i++) { 2026 if (rxq->rx_skb[i]) { 2027 dev_consume_skb_any(rxq->rx_skb[i]); 2028 rxq->rx_desc_count--; 2029 } 2030 } 2031 2032 if (rxq->rx_desc_count) { 2033 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", 2034 rxq->rx_desc_count); 2035 } 2036 2037 if (rxq->index == 0 && 2038 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 2039 iounmap(rxq->rx_desc_area); 2040 else 2041 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, 2042 rxq->rx_desc_area, rxq->rx_desc_dma); 2043 2044 kfree(rxq->rx_skb); 2045 } 2046 2047 static int txq_init(struct mv643xx_eth_private *mp, int index) 2048 { 2049 struct tx_queue *txq = mp->txq + index; 2050 struct tx_desc *tx_desc; 2051 int size; 2052 int ret; 2053 int i; 2054 2055 txq->index = index; 2056 2057 txq->tx_ring_size = mp->tx_ring_size; 2058 2059 /* A queue must always have room for at least one skb. 2060 * Therefore, stop the queue when the free entries reaches 2061 * the maximum number of descriptors per skb. 2062 */ 2063 txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS; 2064 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; 2065 2066 txq->tx_desc_count = 0; 2067 txq->tx_curr_desc = 0; 2068 txq->tx_used_desc = 0; 2069 2070 size = txq->tx_ring_size * sizeof(struct tx_desc); 2071 2072 if (index == 0 && size <= mp->tx_desc_sram_size) { 2073 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 2074 mp->tx_desc_sram_size); 2075 txq->tx_desc_dma = mp->tx_desc_sram_addr; 2076 } else { 2077 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 2078 size, &txq->tx_desc_dma, 2079 GFP_KERNEL); 2080 } 2081 2082 if (txq->tx_desc_area == NULL) { 2083 netdev_err(mp->dev, 2084 "can't allocate tx ring (%d bytes)\n", size); 2085 return -ENOMEM; 2086 } 2087 memset(txq->tx_desc_area, 0, size); 2088 2089 txq->tx_desc_area_size = size; 2090 2091 tx_desc = txq->tx_desc_area; 2092 for (i = 0; i < txq->tx_ring_size; i++) { 2093 struct tx_desc *txd = tx_desc + i; 2094 int nexti; 2095 2096 nexti = i + 1; 2097 if (nexti == txq->tx_ring_size) 2098 nexti = 0; 2099 2100 txd->cmd_sts = 0; 2101 txd->next_desc_ptr = txq->tx_desc_dma + 2102 nexti * sizeof(struct tx_desc); 2103 } 2104 2105 txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), 2106 GFP_KERNEL); 2107 if (!txq->tx_desc_mapping) { 2108 ret = -ENOMEM; 2109 goto err_free_desc_area; 2110 } 2111 2112 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 2113 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, 2114 txq->tx_ring_size * TSO_HEADER_SIZE, 2115 &txq->tso_hdrs_dma, GFP_KERNEL); 2116 if (txq->tso_hdrs == NULL) { 2117 ret = -ENOMEM; 2118 goto err_free_desc_mapping; 2119 } 2120 skb_queue_head_init(&txq->tx_skb); 2121 2122 return 0; 2123 2124 err_free_desc_mapping: 2125 kfree(txq->tx_desc_mapping); 2126 err_free_desc_area: 2127 if (index == 0 && size <= mp->tx_desc_sram_size) 2128 iounmap(txq->tx_desc_area); 2129 else 2130 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2131 txq->tx_desc_area, txq->tx_desc_dma); 2132 return ret; 2133 } 2134 2135 static void txq_deinit(struct tx_queue *txq) 2136 { 2137 struct mv643xx_eth_private *mp = txq_to_mp(txq); 2138 2139 txq_disable(txq); 2140 txq_reclaim(txq, txq->tx_ring_size, 1); 2141 2142 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 2143 2144 if (txq->index == 0 && 2145 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 2146 iounmap(txq->tx_desc_area); 2147 else 2148 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2149 txq->tx_desc_area, txq->tx_desc_dma); 2150 kfree(txq->tx_desc_mapping); 2151 2152 if (txq->tso_hdrs) 2153 dma_free_coherent(mp->dev->dev.parent, 2154 txq->tx_ring_size * TSO_HEADER_SIZE, 2155 txq->tso_hdrs, txq->tso_hdrs_dma); 2156 } 2157 2158 2159 /* netdev ops and related ***************************************************/ 2160 static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) 2161 { 2162 u32 int_cause; 2163 u32 int_cause_ext; 2164 2165 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; 2166 if (int_cause == 0) 2167 return 0; 2168 2169 int_cause_ext = 0; 2170 if (int_cause & INT_EXT) { 2171 int_cause &= ~INT_EXT; 2172 int_cause_ext = rdlp(mp, INT_CAUSE_EXT); 2173 } 2174 2175 if (int_cause) { 2176 wrlp(mp, INT_CAUSE, ~int_cause); 2177 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 2178 ~(rdlp(mp, TXQ_COMMAND) & 0xff); 2179 mp->work_rx |= (int_cause & INT_RX) >> 2; 2180 } 2181 2182 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 2183 if (int_cause_ext) { 2184 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); 2185 if (int_cause_ext & INT_EXT_LINK_PHY) 2186 mp->work_link = 1; 2187 mp->work_tx |= int_cause_ext & INT_EXT_TX; 2188 } 2189 2190 return 1; 2191 } 2192 2193 static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 2194 { 2195 struct net_device *dev = (struct net_device *)dev_id; 2196 struct mv643xx_eth_private *mp = netdev_priv(dev); 2197 2198 if (unlikely(!mv643xx_eth_collect_events(mp))) 2199 return IRQ_NONE; 2200 2201 wrlp(mp, INT_MASK, 0); 2202 napi_schedule(&mp->napi); 2203 2204 return IRQ_HANDLED; 2205 } 2206 2207 static void handle_link_event(struct mv643xx_eth_private *mp) 2208 { 2209 struct net_device *dev = mp->dev; 2210 u32 port_status; 2211 int speed; 2212 int duplex; 2213 int fc; 2214 2215 port_status = rdlp(mp, PORT_STATUS); 2216 if (!(port_status & LINK_UP)) { 2217 if (netif_carrier_ok(dev)) { 2218 int i; 2219 2220 netdev_info(dev, "link down\n"); 2221 2222 netif_carrier_off(dev); 2223 2224 for (i = 0; i < mp->txq_count; i++) { 2225 struct tx_queue *txq = mp->txq + i; 2226 2227 txq_reclaim(txq, txq->tx_ring_size, 1); 2228 txq_reset_hw_ptr(txq); 2229 } 2230 } 2231 return; 2232 } 2233 2234 switch (port_status & PORT_SPEED_MASK) { 2235 case PORT_SPEED_10: 2236 speed = 10; 2237 break; 2238 case PORT_SPEED_100: 2239 speed = 100; 2240 break; 2241 case PORT_SPEED_1000: 2242 speed = 1000; 2243 break; 2244 default: 2245 speed = -1; 2246 break; 2247 } 2248 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2249 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2250 2251 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", 2252 speed, duplex ? "full" : "half", fc ? "en" : "dis"); 2253 2254 if (!netif_carrier_ok(dev)) 2255 netif_carrier_on(dev); 2256 } 2257 2258 static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 2259 { 2260 struct mv643xx_eth_private *mp; 2261 int work_done; 2262 2263 mp = container_of(napi, struct mv643xx_eth_private, napi); 2264 2265 if (unlikely(mp->oom)) { 2266 mp->oom = 0; 2267 del_timer(&mp->rx_oom); 2268 } 2269 2270 work_done = 0; 2271 while (work_done < budget) { 2272 u8 queue_mask; 2273 int queue; 2274 int work_tbd; 2275 2276 if (mp->work_link) { 2277 mp->work_link = 0; 2278 handle_link_event(mp); 2279 work_done++; 2280 continue; 2281 } 2282 2283 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; 2284 if (likely(!mp->oom)) 2285 queue_mask |= mp->work_rx_refill; 2286 2287 if (!queue_mask) { 2288 if (mv643xx_eth_collect_events(mp)) 2289 continue; 2290 break; 2291 } 2292 2293 queue = fls(queue_mask) - 1; 2294 queue_mask = 1 << queue; 2295 2296 work_tbd = budget - work_done; 2297 if (work_tbd > 16) 2298 work_tbd = 16; 2299 2300 if (mp->work_tx_end & queue_mask) { 2301 txq_kick(mp->txq + queue); 2302 } else if (mp->work_tx & queue_mask) { 2303 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); 2304 txq_maybe_wake(mp->txq + queue); 2305 } else if (mp->work_rx & queue_mask) { 2306 work_done += rxq_process(mp->rxq + queue, work_tbd); 2307 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { 2308 work_done += rxq_refill(mp->rxq + queue, work_tbd); 2309 } else { 2310 BUG(); 2311 } 2312 } 2313 2314 if (work_done < budget) { 2315 if (mp->oom) 2316 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2317 napi_complete_done(napi, work_done); 2318 wrlp(mp, INT_MASK, mp->int_mask); 2319 } 2320 2321 return work_done; 2322 } 2323 2324 static inline void oom_timer_wrapper(struct timer_list *t) 2325 { 2326 struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom); 2327 2328 napi_schedule(&mp->napi); 2329 } 2330 2331 static void port_start(struct mv643xx_eth_private *mp) 2332 { 2333 struct net_device *dev = mp->dev; 2334 u32 pscr; 2335 int i; 2336 2337 /* 2338 * Perform PHY reset, if there is a PHY. 2339 */ 2340 if (dev->phydev) { 2341 struct ethtool_link_ksettings cmd; 2342 2343 mv643xx_eth_get_link_ksettings(dev, &cmd); 2344 phy_init_hw(dev->phydev); 2345 mv643xx_eth_set_link_ksettings( 2346 dev, (const struct ethtool_link_ksettings *)&cmd); 2347 phy_start(dev->phydev); 2348 } 2349 2350 /* 2351 * Configure basic link parameters. 2352 */ 2353 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2354 2355 pscr |= SERIAL_PORT_ENABLE; 2356 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2357 2358 pscr |= DO_NOT_FORCE_LINK_FAIL; 2359 if (!dev->phydev) 2360 pscr |= FORCE_LINK_PASS; 2361 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2362 2363 /* 2364 * Configure TX path and queues. 2365 */ 2366 tx_set_rate(mp, 1000000000, 16777216); 2367 for (i = 0; i < mp->txq_count; i++) { 2368 struct tx_queue *txq = mp->txq + i; 2369 2370 txq_reset_hw_ptr(txq); 2371 txq_set_rate(txq, 1000000000, 16777216); 2372 txq_set_fixed_prio_mode(txq); 2373 } 2374 2375 /* 2376 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 2377 * frames to RX queue #0, and include the pseudo-header when 2378 * calculating receive checksums. 2379 */ 2380 mv643xx_eth_set_features(mp->dev, mp->dev->features); 2381 2382 /* 2383 * Treat BPDUs as normal multicasts, and disable partition mode. 2384 */ 2385 wrlp(mp, PORT_CONFIG_EXT, 0x00000000); 2386 2387 /* 2388 * Add configured unicast addresses to address filter table. 2389 */ 2390 mv643xx_eth_program_unicast_filter(mp->dev); 2391 2392 /* 2393 * Enable the receive queues. 2394 */ 2395 for (i = 0; i < mp->rxq_count; i++) { 2396 struct rx_queue *rxq = mp->rxq + i; 2397 u32 addr; 2398 2399 addr = (u32)rxq->rx_desc_dma; 2400 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2401 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); 2402 2403 rxq_enable(rxq); 2404 } 2405 } 2406 2407 static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) 2408 { 2409 int skb_size; 2410 2411 /* 2412 * Reserve 2+14 bytes for an ethernet header (the hardware 2413 * automatically prepends 2 bytes of dummy data to each 2414 * received packet), 16 bytes for up to four VLAN tags, and 2415 * 4 bytes for the trailing FCS -- 36 bytes total. 2416 */ 2417 skb_size = mp->dev->mtu + 36; 2418 2419 /* 2420 * Make sure that the skb size is a multiple of 8 bytes, as 2421 * the lower three bits of the receive descriptor's buffer 2422 * size field are ignored by the hardware. 2423 */ 2424 mp->skb_size = (skb_size + 7) & ~7; 2425 2426 /* 2427 * If NET_SKB_PAD is smaller than a cache line, 2428 * netdev_alloc_skb() will cause skb->data to be misaligned 2429 * to a cache line boundary. If this is the case, include 2430 * some extra space to allow re-aligning the data area. 2431 */ 2432 mp->skb_size += SKB_DMA_REALIGN; 2433 } 2434 2435 static int mv643xx_eth_open(struct net_device *dev) 2436 { 2437 struct mv643xx_eth_private *mp = netdev_priv(dev); 2438 int err; 2439 int i; 2440 2441 wrlp(mp, INT_CAUSE, 0); 2442 wrlp(mp, INT_CAUSE_EXT, 0); 2443 rdlp(mp, INT_CAUSE_EXT); 2444 2445 err = request_irq(dev->irq, mv643xx_eth_irq, 2446 IRQF_SHARED, dev->name, dev); 2447 if (err) { 2448 netdev_err(dev, "can't assign irq\n"); 2449 return -EAGAIN; 2450 } 2451 2452 mv643xx_eth_recalc_skb_size(mp); 2453 2454 napi_enable(&mp->napi); 2455 2456 mp->int_mask = INT_EXT; 2457 2458 for (i = 0; i < mp->rxq_count; i++) { 2459 err = rxq_init(mp, i); 2460 if (err) { 2461 while (--i >= 0) 2462 rxq_deinit(mp->rxq + i); 2463 goto out; 2464 } 2465 2466 rxq_refill(mp->rxq + i, INT_MAX); 2467 mp->int_mask |= INT_RX_0 << i; 2468 } 2469 2470 if (mp->oom) { 2471 mp->rx_oom.expires = jiffies + (HZ / 10); 2472 add_timer(&mp->rx_oom); 2473 } 2474 2475 for (i = 0; i < mp->txq_count; i++) { 2476 err = txq_init(mp, i); 2477 if (err) { 2478 while (--i >= 0) 2479 txq_deinit(mp->txq + i); 2480 goto out_free; 2481 } 2482 mp->int_mask |= INT_TX_END_0 << i; 2483 } 2484 2485 add_timer(&mp->mib_counters_timer); 2486 port_start(mp); 2487 2488 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2489 wrlp(mp, INT_MASK, mp->int_mask); 2490 2491 return 0; 2492 2493 2494 out_free: 2495 for (i = 0; i < mp->rxq_count; i++) 2496 rxq_deinit(mp->rxq + i); 2497 out: 2498 free_irq(dev->irq, dev); 2499 2500 return err; 2501 } 2502 2503 static void port_reset(struct mv643xx_eth_private *mp) 2504 { 2505 unsigned int data; 2506 int i; 2507 2508 for (i = 0; i < mp->rxq_count; i++) 2509 rxq_disable(mp->rxq + i); 2510 for (i = 0; i < mp->txq_count; i++) 2511 txq_disable(mp->txq + i); 2512 2513 while (1) { 2514 u32 ps = rdlp(mp, PORT_STATUS); 2515 2516 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) 2517 break; 2518 udelay(10); 2519 } 2520 2521 /* Reset the Enable bit in the Configuration Register */ 2522 data = rdlp(mp, PORT_SERIAL_CONTROL); 2523 data &= ~(SERIAL_PORT_ENABLE | 2524 DO_NOT_FORCE_LINK_FAIL | 2525 FORCE_LINK_PASS); 2526 wrlp(mp, PORT_SERIAL_CONTROL, data); 2527 } 2528 2529 static int mv643xx_eth_stop(struct net_device *dev) 2530 { 2531 struct mv643xx_eth_private *mp = netdev_priv(dev); 2532 int i; 2533 2534 wrlp(mp, INT_MASK_EXT, 0x00000000); 2535 wrlp(mp, INT_MASK, 0x00000000); 2536 rdlp(mp, INT_MASK); 2537 2538 napi_disable(&mp->napi); 2539 2540 del_timer_sync(&mp->rx_oom); 2541 2542 netif_carrier_off(dev); 2543 if (dev->phydev) 2544 phy_stop(dev->phydev); 2545 free_irq(dev->irq, dev); 2546 2547 port_reset(mp); 2548 mv643xx_eth_get_stats(dev); 2549 mib_counters_update(mp); 2550 del_timer_sync(&mp->mib_counters_timer); 2551 2552 for (i = 0; i < mp->rxq_count; i++) 2553 rxq_deinit(mp->rxq + i); 2554 for (i = 0; i < mp->txq_count; i++) 2555 txq_deinit(mp->txq + i); 2556 2557 return 0; 2558 } 2559 2560 static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2561 { 2562 int ret; 2563 2564 if (!dev->phydev) 2565 return -ENOTSUPP; 2566 2567 ret = phy_mii_ioctl(dev->phydev, ifr, cmd); 2568 if (!ret) 2569 mv643xx_eth_adjust_link(dev); 2570 return ret; 2571 } 2572 2573 static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2574 { 2575 struct mv643xx_eth_private *mp = netdev_priv(dev); 2576 2577 dev->mtu = new_mtu; 2578 mv643xx_eth_recalc_skb_size(mp); 2579 tx_set_rate(mp, 1000000000, 16777216); 2580 2581 if (!netif_running(dev)) 2582 return 0; 2583 2584 /* 2585 * Stop and then re-open the interface. This will allocate RX 2586 * skbs of the new MTU. 2587 * There is a possible danger that the open will not succeed, 2588 * due to memory being full. 2589 */ 2590 mv643xx_eth_stop(dev); 2591 if (mv643xx_eth_open(dev)) { 2592 netdev_err(dev, 2593 "fatal error on re-opening device after MTU change\n"); 2594 } 2595 2596 return 0; 2597 } 2598 2599 static void tx_timeout_task(struct work_struct *ugly) 2600 { 2601 struct mv643xx_eth_private *mp; 2602 2603 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2604 if (netif_running(mp->dev)) { 2605 netif_tx_stop_all_queues(mp->dev); 2606 port_reset(mp); 2607 port_start(mp); 2608 netif_tx_wake_all_queues(mp->dev); 2609 } 2610 } 2611 2612 static void mv643xx_eth_tx_timeout(struct net_device *dev) 2613 { 2614 struct mv643xx_eth_private *mp = netdev_priv(dev); 2615 2616 netdev_info(dev, "tx timeout\n"); 2617 2618 schedule_work(&mp->tx_timeout_task); 2619 } 2620 2621 #ifdef CONFIG_NET_POLL_CONTROLLER 2622 static void mv643xx_eth_netpoll(struct net_device *dev) 2623 { 2624 struct mv643xx_eth_private *mp = netdev_priv(dev); 2625 2626 wrlp(mp, INT_MASK, 0x00000000); 2627 rdlp(mp, INT_MASK); 2628 2629 mv643xx_eth_irq(dev->irq, dev); 2630 2631 wrlp(mp, INT_MASK, mp->int_mask); 2632 } 2633 #endif 2634 2635 2636 /* platform glue ************************************************************/ 2637 static void 2638 mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, 2639 const struct mbus_dram_target_info *dram) 2640 { 2641 void __iomem *base = msp->base; 2642 u32 win_enable; 2643 u32 win_protect; 2644 int i; 2645 2646 for (i = 0; i < 6; i++) { 2647 writel(0, base + WINDOW_BASE(i)); 2648 writel(0, base + WINDOW_SIZE(i)); 2649 if (i < 4) 2650 writel(0, base + WINDOW_REMAP_HIGH(i)); 2651 } 2652 2653 win_enable = 0x3f; 2654 win_protect = 0; 2655 2656 for (i = 0; i < dram->num_cs; i++) { 2657 const struct mbus_dram_window *cs = dram->cs + i; 2658 2659 writel((cs->base & 0xffff0000) | 2660 (cs->mbus_attr << 8) | 2661 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 2662 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 2663 2664 win_enable &= ~(1 << i); 2665 win_protect |= 3 << (2 * i); 2666 } 2667 2668 writel(win_enable, base + WINDOW_BAR_ENABLE); 2669 msp->win_protect = win_protect; 2670 } 2671 2672 static void infer_hw_params(struct mv643xx_eth_shared_private *msp) 2673 { 2674 /* 2675 * Check whether we have a 14-bit coal limit field in bits 2676 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the 2677 * SDMA config register. 2678 */ 2679 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); 2680 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) 2681 msp->extended_rx_coal_limit = 1; 2682 else 2683 msp->extended_rx_coal_limit = 0; 2684 2685 /* 2686 * Check whether the MAC supports TX rate control, and if 2687 * yes, whether its associated registers are in the old or 2688 * the new place. 2689 */ 2690 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); 2691 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { 2692 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2693 } else { 2694 writel(7, msp->base + 0x0400 + TX_BW_RATE); 2695 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) 2696 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2697 else 2698 msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2699 } 2700 } 2701 2702 #if defined(CONFIG_OF) 2703 static const struct of_device_id mv643xx_eth_shared_ids[] = { 2704 { .compatible = "marvell,orion-eth", }, 2705 { .compatible = "marvell,kirkwood-eth", }, 2706 { } 2707 }; 2708 MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); 2709 #endif 2710 2711 #if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60) 2712 #define mv643xx_eth_property(_np, _name, _v) \ 2713 do { \ 2714 u32 tmp; \ 2715 if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \ 2716 _v = tmp; \ 2717 } while (0) 2718 2719 static struct platform_device *port_platdev[3]; 2720 2721 static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, 2722 struct device_node *pnp) 2723 { 2724 struct platform_device *ppdev; 2725 struct mv643xx_eth_platform_data ppd; 2726 struct resource res; 2727 const char *mac_addr; 2728 int ret; 2729 int dev_num = 0; 2730 2731 memset(&ppd, 0, sizeof(ppd)); 2732 ppd.shared = pdev; 2733 2734 memset(&res, 0, sizeof(res)); 2735 if (of_irq_to_resource(pnp, 0, &res) <= 0) { 2736 dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); 2737 return -EINVAL; 2738 } 2739 2740 if (of_property_read_u32(pnp, "reg", &ppd.port_number)) { 2741 dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name); 2742 return -EINVAL; 2743 } 2744 2745 if (ppd.port_number >= 3) { 2746 dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name); 2747 return -EINVAL; 2748 } 2749 2750 while (dev_num < 3 && port_platdev[dev_num]) 2751 dev_num++; 2752 2753 if (dev_num == 3) { 2754 dev_err(&pdev->dev, "too many ports registered\n"); 2755 return -EINVAL; 2756 } 2757 2758 mac_addr = of_get_mac_address(pnp); 2759 if (mac_addr) 2760 memcpy(ppd.mac_addr, mac_addr, ETH_ALEN); 2761 2762 mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); 2763 mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); 2764 mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size); 2765 mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size); 2766 mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr); 2767 mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size); 2768 2769 ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0); 2770 if (!ppd.phy_node) { 2771 ppd.phy_addr = MV643XX_ETH_PHY_NONE; 2772 of_property_read_u32(pnp, "speed", &ppd.speed); 2773 of_property_read_u32(pnp, "duplex", &ppd.duplex); 2774 } 2775 2776 ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num); 2777 if (!ppdev) 2778 return -ENOMEM; 2779 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 2780 ppdev->dev.of_node = pnp; 2781 2782 ret = platform_device_add_resources(ppdev, &res, 1); 2783 if (ret) 2784 goto port_err; 2785 2786 ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); 2787 if (ret) 2788 goto port_err; 2789 2790 ret = platform_device_add(ppdev); 2791 if (ret) 2792 goto port_err; 2793 2794 port_platdev[dev_num] = ppdev; 2795 2796 return 0; 2797 2798 port_err: 2799 platform_device_put(ppdev); 2800 return ret; 2801 } 2802 2803 static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) 2804 { 2805 struct mv643xx_eth_shared_platform_data *pd; 2806 struct device_node *pnp, *np = pdev->dev.of_node; 2807 int ret; 2808 2809 /* bail out if not registered from DT */ 2810 if (!np) 2811 return 0; 2812 2813 pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL); 2814 if (!pd) 2815 return -ENOMEM; 2816 pdev->dev.platform_data = pd; 2817 2818 mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit); 2819 2820 for_each_available_child_of_node(np, pnp) { 2821 ret = mv643xx_eth_shared_of_add_port(pdev, pnp); 2822 if (ret) { 2823 of_node_put(pnp); 2824 return ret; 2825 } 2826 } 2827 return 0; 2828 } 2829 2830 static void mv643xx_eth_shared_of_remove(void) 2831 { 2832 int n; 2833 2834 for (n = 0; n < 3; n++) { 2835 platform_device_del(port_platdev[n]); 2836 port_platdev[n] = NULL; 2837 } 2838 } 2839 #else 2840 static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) 2841 { 2842 return 0; 2843 } 2844 2845 static inline void mv643xx_eth_shared_of_remove(void) 2846 { 2847 } 2848 #endif 2849 2850 static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2851 { 2852 static int mv643xx_eth_version_printed; 2853 struct mv643xx_eth_shared_platform_data *pd; 2854 struct mv643xx_eth_shared_private *msp; 2855 const struct mbus_dram_target_info *dram; 2856 struct resource *res; 2857 int ret; 2858 2859 if (!mv643xx_eth_version_printed++) 2860 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", 2861 mv643xx_eth_driver_version); 2862 2863 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2864 if (res == NULL) 2865 return -EINVAL; 2866 2867 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); 2868 if (msp == NULL) 2869 return -ENOMEM; 2870 platform_set_drvdata(pdev, msp); 2871 2872 msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 2873 if (msp->base == NULL) 2874 return -ENOMEM; 2875 2876 msp->clk = devm_clk_get(&pdev->dev, NULL); 2877 if (!IS_ERR(msp->clk)) 2878 clk_prepare_enable(msp->clk); 2879 2880 /* 2881 * (Re-)program MBUS remapping windows if we are asked to. 2882 */ 2883 dram = mv_mbus_dram_info(); 2884 if (dram) 2885 mv643xx_eth_conf_mbus_windows(msp, dram); 2886 2887 ret = mv643xx_eth_shared_of_probe(pdev); 2888 if (ret) 2889 return ret; 2890 pd = dev_get_platdata(&pdev->dev); 2891 2892 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2893 pd->tx_csum_limit : 9 * 1024; 2894 infer_hw_params(msp); 2895 2896 return 0; 2897 } 2898 2899 static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2900 { 2901 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2902 2903 mv643xx_eth_shared_of_remove(); 2904 if (!IS_ERR(msp->clk)) 2905 clk_disable_unprepare(msp->clk); 2906 return 0; 2907 } 2908 2909 static struct platform_driver mv643xx_eth_shared_driver = { 2910 .probe = mv643xx_eth_shared_probe, 2911 .remove = mv643xx_eth_shared_remove, 2912 .driver = { 2913 .name = MV643XX_ETH_SHARED_NAME, 2914 .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), 2915 }, 2916 }; 2917 2918 static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) 2919 { 2920 int addr_shift = 5 * mp->port_num; 2921 u32 data; 2922 2923 data = rdl(mp, PHY_ADDR); 2924 data &= ~(0x1f << addr_shift); 2925 data |= (phy_addr & 0x1f) << addr_shift; 2926 wrl(mp, PHY_ADDR, data); 2927 } 2928 2929 static int phy_addr_get(struct mv643xx_eth_private *mp) 2930 { 2931 unsigned int data; 2932 2933 data = rdl(mp, PHY_ADDR); 2934 2935 return (data >> (5 * mp->port_num)) & 0x1f; 2936 } 2937 2938 static void set_params(struct mv643xx_eth_private *mp, 2939 struct mv643xx_eth_platform_data *pd) 2940 { 2941 struct net_device *dev = mp->dev; 2942 unsigned int tx_ring_size; 2943 2944 if (is_valid_ether_addr(pd->mac_addr)) 2945 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); 2946 else 2947 uc_addr_get(mp, dev->dev_addr); 2948 2949 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2950 if (pd->rx_queue_size) 2951 mp->rx_ring_size = pd->rx_queue_size; 2952 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2953 mp->rx_desc_sram_size = pd->rx_sram_size; 2954 2955 mp->rxq_count = pd->rx_queue_count ? : 1; 2956 2957 tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2958 if (pd->tx_queue_size) 2959 tx_ring_size = pd->tx_queue_size; 2960 2961 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size, 2962 MV643XX_MAX_SKB_DESCS * 2, 4096); 2963 if (mp->tx_ring_size != tx_ring_size) 2964 netdev_warn(dev, "TX queue size set to %u (requested %u)\n", 2965 mp->tx_ring_size, tx_ring_size); 2966 2967 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2968 mp->tx_desc_sram_size = pd->tx_sram_size; 2969 2970 mp->txq_count = pd->tx_queue_count ? : 1; 2971 } 2972 2973 static int get_phy_mode(struct mv643xx_eth_private *mp) 2974 { 2975 struct device *dev = mp->dev->dev.parent; 2976 int iface = -1; 2977 2978 if (dev->of_node) 2979 iface = of_get_phy_mode(dev->of_node); 2980 2981 /* Historical default if unspecified. We could also read/write 2982 * the interface state in the PSC1 2983 */ 2984 if (iface < 0) 2985 iface = PHY_INTERFACE_MODE_GMII; 2986 return iface; 2987 } 2988 2989 static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2990 int phy_addr) 2991 { 2992 struct phy_device *phydev; 2993 int start; 2994 int num; 2995 int i; 2996 char phy_id[MII_BUS_ID_SIZE + 3]; 2997 2998 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2999 start = phy_addr_get(mp) & 0x1f; 3000 num = 32; 3001 } else { 3002 start = phy_addr & 0x1f; 3003 num = 1; 3004 } 3005 3006 /* Attempt to connect to the PHY using orion-mdio */ 3007 phydev = ERR_PTR(-ENODEV); 3008 for (i = 0; i < num; i++) { 3009 int addr = (start + i) & 0x1f; 3010 3011 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 3012 "orion-mdio-mii", addr); 3013 3014 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, 3015 get_phy_mode(mp)); 3016 if (!IS_ERR(phydev)) { 3017 phy_addr_set(mp, addr); 3018 break; 3019 } 3020 } 3021 3022 return phydev; 3023 } 3024 3025 static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) 3026 { 3027 struct net_device *dev = mp->dev; 3028 struct phy_device *phy = dev->phydev; 3029 3030 if (speed == 0) { 3031 phy->autoneg = AUTONEG_ENABLE; 3032 phy->speed = 0; 3033 phy->duplex = 0; 3034 phy->advertising = phy->supported | ADVERTISED_Autoneg; 3035 } else { 3036 phy->autoneg = AUTONEG_DISABLE; 3037 phy->advertising = 0; 3038 phy->speed = speed; 3039 phy->duplex = duplex; 3040 } 3041 phy_start_aneg(phy); 3042 } 3043 3044 static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 3045 { 3046 struct net_device *dev = mp->dev; 3047 u32 pscr; 3048 3049 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 3050 if (pscr & SERIAL_PORT_ENABLE) { 3051 pscr &= ~SERIAL_PORT_ENABLE; 3052 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 3053 } 3054 3055 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 3056 if (!dev->phydev) { 3057 pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 3058 if (speed == SPEED_1000) 3059 pscr |= SET_GMII_SPEED_TO_1000; 3060 else if (speed == SPEED_100) 3061 pscr |= SET_MII_SPEED_TO_100; 3062 3063 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; 3064 3065 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; 3066 if (duplex == DUPLEX_FULL) 3067 pscr |= SET_FULL_DUPLEX_MODE; 3068 } 3069 3070 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 3071 } 3072 3073 static const struct net_device_ops mv643xx_eth_netdev_ops = { 3074 .ndo_open = mv643xx_eth_open, 3075 .ndo_stop = mv643xx_eth_stop, 3076 .ndo_start_xmit = mv643xx_eth_xmit, 3077 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, 3078 .ndo_set_mac_address = mv643xx_eth_set_mac_address, 3079 .ndo_validate_addr = eth_validate_addr, 3080 .ndo_do_ioctl = mv643xx_eth_ioctl, 3081 .ndo_change_mtu = mv643xx_eth_change_mtu, 3082 .ndo_set_features = mv643xx_eth_set_features, 3083 .ndo_tx_timeout = mv643xx_eth_tx_timeout, 3084 .ndo_get_stats = mv643xx_eth_get_stats, 3085 #ifdef CONFIG_NET_POLL_CONTROLLER 3086 .ndo_poll_controller = mv643xx_eth_netpoll, 3087 #endif 3088 }; 3089 3090 static int mv643xx_eth_probe(struct platform_device *pdev) 3091 { 3092 struct mv643xx_eth_platform_data *pd; 3093 struct mv643xx_eth_private *mp; 3094 struct net_device *dev; 3095 struct phy_device *phydev = NULL; 3096 struct resource *res; 3097 int err; 3098 3099 pd = dev_get_platdata(&pdev->dev); 3100 if (pd == NULL) { 3101 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); 3102 return -ENODEV; 3103 } 3104 3105 if (pd->shared == NULL) { 3106 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); 3107 return -ENODEV; 3108 } 3109 3110 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); 3111 if (!dev) 3112 return -ENOMEM; 3113 3114 SET_NETDEV_DEV(dev, &pdev->dev); 3115 mp = netdev_priv(dev); 3116 platform_set_drvdata(pdev, mp); 3117 3118 mp->shared = platform_get_drvdata(pd->shared); 3119 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); 3120 mp->port_num = pd->port_number; 3121 3122 mp->dev = dev; 3123 3124 /* Kirkwood resets some registers on gated clocks. Especially 3125 * CLK125_BYPASS_EN must be cleared but is not available on 3126 * all other SoCs/System Controllers using this driver. 3127 */ 3128 if (of_device_is_compatible(pdev->dev.of_node, 3129 "marvell,kirkwood-eth-port")) 3130 wrlp(mp, PORT_SERIAL_CONTROL1, 3131 rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN); 3132 3133 /* 3134 * Start with a default rate, and if there is a clock, allow 3135 * it to override the default. 3136 */ 3137 mp->t_clk = 133000000; 3138 mp->clk = devm_clk_get(&pdev->dev, NULL); 3139 if (!IS_ERR(mp->clk)) { 3140 clk_prepare_enable(mp->clk); 3141 mp->t_clk = clk_get_rate(mp->clk); 3142 } else if (!IS_ERR(mp->shared->clk)) { 3143 mp->t_clk = clk_get_rate(mp->shared->clk); 3144 } 3145 3146 set_params(mp, pd); 3147 netif_set_real_num_tx_queues(dev, mp->txq_count); 3148 netif_set_real_num_rx_queues(dev, mp->rxq_count); 3149 3150 err = 0; 3151 if (pd->phy_node) { 3152 phydev = of_phy_connect(mp->dev, pd->phy_node, 3153 mv643xx_eth_adjust_link, 0, 3154 get_phy_mode(mp)); 3155 if (!phydev) 3156 err = -ENODEV; 3157 else 3158 phy_addr_set(mp, phydev->mdio.addr); 3159 } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { 3160 phydev = phy_scan(mp, pd->phy_addr); 3161 3162 if (IS_ERR(phydev)) 3163 err = PTR_ERR(phydev); 3164 else 3165 phy_init(mp, pd->speed, pd->duplex); 3166 } 3167 if (err == -ENODEV) { 3168 err = -EPROBE_DEFER; 3169 goto out; 3170 } 3171 if (err) 3172 goto out; 3173 3174 dev->ethtool_ops = &mv643xx_eth_ethtool_ops; 3175 3176 init_pscr(mp, pd->speed, pd->duplex); 3177 3178 3179 mib_counters_clear(mp); 3180 3181 timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0); 3182 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 3183 3184 spin_lock_init(&mp->mib_counters_lock); 3185 3186 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 3187 3188 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT); 3189 3190 timer_setup(&mp->rx_oom, oom_timer_wrapper, 0); 3191 3192 3193 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 3194 BUG_ON(!res); 3195 dev->irq = res->start; 3196 3197 dev->netdev_ops = &mv643xx_eth_netdev_ops; 3198 3199 dev->watchdog_timeo = 2 * HZ; 3200 dev->base_addr = 0; 3201 3202 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 3203 dev->vlan_features = dev->features; 3204 3205 dev->features |= NETIF_F_RXCSUM; 3206 dev->hw_features = dev->features; 3207 3208 dev->priv_flags |= IFF_UNICAST_FLT; 3209 dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; 3210 3211 /* MTU range: 64 - 9500 */ 3212 dev->min_mtu = 64; 3213 dev->max_mtu = 9500; 3214 3215 if (mp->shared->win_protect) 3216 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); 3217 3218 netif_carrier_off(dev); 3219 3220 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); 3221 3222 set_rx_coal(mp, 250); 3223 set_tx_coal(mp, 0); 3224 3225 err = register_netdev(dev); 3226 if (err) 3227 goto out; 3228 3229 netdev_notice(dev, "port %d with MAC address %pM\n", 3230 mp->port_num, dev->dev_addr); 3231 3232 if (mp->tx_desc_sram_size > 0) 3233 netdev_notice(dev, "configured with sram\n"); 3234 3235 return 0; 3236 3237 out: 3238 if (!IS_ERR(mp->clk)) 3239 clk_disable_unprepare(mp->clk); 3240 free_netdev(dev); 3241 3242 return err; 3243 } 3244 3245 static int mv643xx_eth_remove(struct platform_device *pdev) 3246 { 3247 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 3248 struct net_device *dev = mp->dev; 3249 3250 unregister_netdev(mp->dev); 3251 if (dev->phydev) 3252 phy_disconnect(dev->phydev); 3253 cancel_work_sync(&mp->tx_timeout_task); 3254 3255 if (!IS_ERR(mp->clk)) 3256 clk_disable_unprepare(mp->clk); 3257 3258 free_netdev(mp->dev); 3259 3260 return 0; 3261 } 3262 3263 static void mv643xx_eth_shutdown(struct platform_device *pdev) 3264 { 3265 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 3266 3267 /* Mask all interrupts on ethernet port */ 3268 wrlp(mp, INT_MASK, 0); 3269 rdlp(mp, INT_MASK); 3270 3271 if (netif_running(mp->dev)) 3272 port_reset(mp); 3273 } 3274 3275 static struct platform_driver mv643xx_eth_driver = { 3276 .probe = mv643xx_eth_probe, 3277 .remove = mv643xx_eth_remove, 3278 .shutdown = mv643xx_eth_shutdown, 3279 .driver = { 3280 .name = MV643XX_ETH_NAME, 3281 }, 3282 }; 3283 3284 static struct platform_driver * const drivers[] = { 3285 &mv643xx_eth_shared_driver, 3286 &mv643xx_eth_driver, 3287 }; 3288 3289 static int __init mv643xx_eth_init_module(void) 3290 { 3291 return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 3292 } 3293 module_init(mv643xx_eth_init_module); 3294 3295 static void __exit mv643xx_eth_cleanup_module(void) 3296 { 3297 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 3298 } 3299 module_exit(mv643xx_eth_cleanup_module); 3300 3301 MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " 3302 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); 3303 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 3304 MODULE_LICENSE("GPL"); 3305 MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); 3306 MODULE_ALIAS("platform:" MV643XX_ETH_NAME); 3307