1*527a6266SJeff Kirsher /* 2*527a6266SJeff Kirsher * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports 3*527a6266SJeff Kirsher * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 4*527a6266SJeff Kirsher * 5*527a6266SJeff Kirsher * Based on the 64360 driver from: 6*527a6266SJeff Kirsher * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> 7*527a6266SJeff Kirsher * Rabeeh Khoury <rabeeh@marvell.com> 8*527a6266SJeff Kirsher * 9*527a6266SJeff Kirsher * Copyright (C) 2003 PMC-Sierra, Inc., 10*527a6266SJeff Kirsher * written by Manish Lachwani 11*527a6266SJeff Kirsher * 12*527a6266SJeff Kirsher * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> 13*527a6266SJeff Kirsher * 14*527a6266SJeff Kirsher * Copyright (C) 2004-2006 MontaVista Software, Inc. 15*527a6266SJeff Kirsher * Dale Farnsworth <dale@farnsworth.org> 16*527a6266SJeff Kirsher * 17*527a6266SJeff Kirsher * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 18*527a6266SJeff Kirsher * <sjhill@realitydiluted.com> 19*527a6266SJeff Kirsher * 20*527a6266SJeff Kirsher * Copyright (C) 2007-2008 Marvell Semiconductor 21*527a6266SJeff Kirsher * Lennert Buytenhek <buytenh@marvell.com> 22*527a6266SJeff Kirsher * 23*527a6266SJeff Kirsher * This program is free software; you can redistribute it and/or 24*527a6266SJeff Kirsher * modify it under the terms of the GNU General Public License 25*527a6266SJeff Kirsher * as published by the Free Software Foundation; either version 2 26*527a6266SJeff Kirsher * of the License, or (at your option) any later version. 27*527a6266SJeff Kirsher * 28*527a6266SJeff Kirsher * This program is distributed in the hope that it will be useful, 29*527a6266SJeff Kirsher * but WITHOUT ANY WARRANTY; without even the implied warranty of 30*527a6266SJeff Kirsher * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31*527a6266SJeff Kirsher * GNU General Public License for more details. 32*527a6266SJeff Kirsher * 33*527a6266SJeff Kirsher * You should have received a copy of the GNU General Public License 34*527a6266SJeff Kirsher * along with this program; if not, write to the Free Software 35*527a6266SJeff Kirsher * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 36*527a6266SJeff Kirsher */ 37*527a6266SJeff Kirsher 38*527a6266SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39*527a6266SJeff Kirsher 40*527a6266SJeff Kirsher #include <linux/init.h> 41*527a6266SJeff Kirsher #include <linux/dma-mapping.h> 42*527a6266SJeff Kirsher #include <linux/in.h> 43*527a6266SJeff Kirsher #include <linux/ip.h> 44*527a6266SJeff Kirsher #include <linux/tcp.h> 45*527a6266SJeff Kirsher #include <linux/udp.h> 46*527a6266SJeff Kirsher #include <linux/etherdevice.h> 47*527a6266SJeff Kirsher #include <linux/delay.h> 48*527a6266SJeff Kirsher #include <linux/ethtool.h> 49*527a6266SJeff Kirsher #include <linux/platform_device.h> 50*527a6266SJeff Kirsher #include <linux/module.h> 51*527a6266SJeff Kirsher #include <linux/kernel.h> 52*527a6266SJeff Kirsher #include <linux/spinlock.h> 53*527a6266SJeff Kirsher #include <linux/workqueue.h> 54*527a6266SJeff Kirsher #include <linux/phy.h> 55*527a6266SJeff Kirsher #include <linux/mv643xx_eth.h> 56*527a6266SJeff Kirsher #include <linux/io.h> 57*527a6266SJeff Kirsher #include <linux/types.h> 58*527a6266SJeff Kirsher #include <linux/inet_lro.h> 59*527a6266SJeff Kirsher #include <linux/slab.h> 60*527a6266SJeff Kirsher #include <asm/system.h> 61*527a6266SJeff Kirsher 62*527a6266SJeff Kirsher static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 63*527a6266SJeff Kirsher static char mv643xx_eth_driver_version[] = "1.4"; 64*527a6266SJeff Kirsher 65*527a6266SJeff Kirsher 66*527a6266SJeff Kirsher /* 67*527a6266SJeff Kirsher * Registers shared between all ports. 68*527a6266SJeff Kirsher */ 69*527a6266SJeff Kirsher #define PHY_ADDR 0x0000 70*527a6266SJeff Kirsher #define SMI_REG 0x0004 71*527a6266SJeff Kirsher #define SMI_BUSY 0x10000000 72*527a6266SJeff Kirsher #define SMI_READ_VALID 0x08000000 73*527a6266SJeff Kirsher #define SMI_OPCODE_READ 0x04000000 74*527a6266SJeff Kirsher #define SMI_OPCODE_WRITE 0x00000000 75*527a6266SJeff Kirsher #define ERR_INT_CAUSE 0x0080 76*527a6266SJeff Kirsher #define ERR_INT_SMI_DONE 0x00000010 77*527a6266SJeff Kirsher #define ERR_INT_MASK 0x0084 78*527a6266SJeff Kirsher #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 79*527a6266SJeff Kirsher #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 80*527a6266SJeff Kirsher #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 81*527a6266SJeff Kirsher #define WINDOW_BAR_ENABLE 0x0290 82*527a6266SJeff Kirsher #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) 83*527a6266SJeff Kirsher 84*527a6266SJeff Kirsher /* 85*527a6266SJeff Kirsher * Main per-port registers. These live at offset 0x0400 for 86*527a6266SJeff Kirsher * port #0, 0x0800 for port #1, and 0x0c00 for port #2. 87*527a6266SJeff Kirsher */ 88*527a6266SJeff Kirsher #define PORT_CONFIG 0x0000 89*527a6266SJeff Kirsher #define UNICAST_PROMISCUOUS_MODE 0x00000001 90*527a6266SJeff Kirsher #define PORT_CONFIG_EXT 0x0004 91*527a6266SJeff Kirsher #define MAC_ADDR_LOW 0x0014 92*527a6266SJeff Kirsher #define MAC_ADDR_HIGH 0x0018 93*527a6266SJeff Kirsher #define SDMA_CONFIG 0x001c 94*527a6266SJeff Kirsher #define TX_BURST_SIZE_16_64BIT 0x01000000 95*527a6266SJeff Kirsher #define TX_BURST_SIZE_4_64BIT 0x00800000 96*527a6266SJeff Kirsher #define BLM_TX_NO_SWAP 0x00000020 97*527a6266SJeff Kirsher #define BLM_RX_NO_SWAP 0x00000010 98*527a6266SJeff Kirsher #define RX_BURST_SIZE_16_64BIT 0x00000008 99*527a6266SJeff Kirsher #define RX_BURST_SIZE_4_64BIT 0x00000004 100*527a6266SJeff Kirsher #define PORT_SERIAL_CONTROL 0x003c 101*527a6266SJeff Kirsher #define SET_MII_SPEED_TO_100 0x01000000 102*527a6266SJeff Kirsher #define SET_GMII_SPEED_TO_1000 0x00800000 103*527a6266SJeff Kirsher #define SET_FULL_DUPLEX_MODE 0x00200000 104*527a6266SJeff Kirsher #define MAX_RX_PACKET_9700BYTE 0x000a0000 105*527a6266SJeff Kirsher #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 106*527a6266SJeff Kirsher #define DO_NOT_FORCE_LINK_FAIL 0x00000400 107*527a6266SJeff Kirsher #define SERIAL_PORT_CONTROL_RESERVED 0x00000200 108*527a6266SJeff Kirsher #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 109*527a6266SJeff Kirsher #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 110*527a6266SJeff Kirsher #define FORCE_LINK_PASS 0x00000002 111*527a6266SJeff Kirsher #define SERIAL_PORT_ENABLE 0x00000001 112*527a6266SJeff Kirsher #define PORT_STATUS 0x0044 113*527a6266SJeff Kirsher #define TX_FIFO_EMPTY 0x00000400 114*527a6266SJeff Kirsher #define TX_IN_PROGRESS 0x00000080 115*527a6266SJeff Kirsher #define PORT_SPEED_MASK 0x00000030 116*527a6266SJeff Kirsher #define PORT_SPEED_1000 0x00000010 117*527a6266SJeff Kirsher #define PORT_SPEED_100 0x00000020 118*527a6266SJeff Kirsher #define PORT_SPEED_10 0x00000000 119*527a6266SJeff Kirsher #define FLOW_CONTROL_ENABLED 0x00000008 120*527a6266SJeff Kirsher #define FULL_DUPLEX 0x00000004 121*527a6266SJeff Kirsher #define LINK_UP 0x00000002 122*527a6266SJeff Kirsher #define TXQ_COMMAND 0x0048 123*527a6266SJeff Kirsher #define TXQ_FIX_PRIO_CONF 0x004c 124*527a6266SJeff Kirsher #define TX_BW_RATE 0x0050 125*527a6266SJeff Kirsher #define TX_BW_MTU 0x0058 126*527a6266SJeff Kirsher #define TX_BW_BURST 0x005c 127*527a6266SJeff Kirsher #define INT_CAUSE 0x0060 128*527a6266SJeff Kirsher #define INT_TX_END 0x07f80000 129*527a6266SJeff Kirsher #define INT_TX_END_0 0x00080000 130*527a6266SJeff Kirsher #define INT_RX 0x000003fc 131*527a6266SJeff Kirsher #define INT_RX_0 0x00000004 132*527a6266SJeff Kirsher #define INT_EXT 0x00000002 133*527a6266SJeff Kirsher #define INT_CAUSE_EXT 0x0064 134*527a6266SJeff Kirsher #define INT_EXT_LINK_PHY 0x00110000 135*527a6266SJeff Kirsher #define INT_EXT_TX 0x000000ff 136*527a6266SJeff Kirsher #define INT_MASK 0x0068 137*527a6266SJeff Kirsher #define INT_MASK_EXT 0x006c 138*527a6266SJeff Kirsher #define TX_FIFO_URGENT_THRESHOLD 0x0074 139*527a6266SJeff Kirsher #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc 140*527a6266SJeff Kirsher #define TX_BW_RATE_MOVED 0x00e0 141*527a6266SJeff Kirsher #define TX_BW_MTU_MOVED 0x00e8 142*527a6266SJeff Kirsher #define TX_BW_BURST_MOVED 0x00ec 143*527a6266SJeff Kirsher #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) 144*527a6266SJeff Kirsher #define RXQ_COMMAND 0x0280 145*527a6266SJeff Kirsher #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) 146*527a6266SJeff Kirsher #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) 147*527a6266SJeff Kirsher #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) 148*527a6266SJeff Kirsher #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) 149*527a6266SJeff Kirsher 150*527a6266SJeff Kirsher /* 151*527a6266SJeff Kirsher * Misc per-port registers. 152*527a6266SJeff Kirsher */ 153*527a6266SJeff Kirsher #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) 154*527a6266SJeff Kirsher #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) 155*527a6266SJeff Kirsher #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) 156*527a6266SJeff Kirsher #define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) 157*527a6266SJeff Kirsher 158*527a6266SJeff Kirsher 159*527a6266SJeff Kirsher /* 160*527a6266SJeff Kirsher * SDMA configuration register default value. 161*527a6266SJeff Kirsher */ 162*527a6266SJeff Kirsher #if defined(__BIG_ENDIAN) 163*527a6266SJeff Kirsher #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 164*527a6266SJeff Kirsher (RX_BURST_SIZE_4_64BIT | \ 165*527a6266SJeff Kirsher TX_BURST_SIZE_4_64BIT) 166*527a6266SJeff Kirsher #elif defined(__LITTLE_ENDIAN) 167*527a6266SJeff Kirsher #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 168*527a6266SJeff Kirsher (RX_BURST_SIZE_4_64BIT | \ 169*527a6266SJeff Kirsher BLM_RX_NO_SWAP | \ 170*527a6266SJeff Kirsher BLM_TX_NO_SWAP | \ 171*527a6266SJeff Kirsher TX_BURST_SIZE_4_64BIT) 172*527a6266SJeff Kirsher #else 173*527a6266SJeff Kirsher #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 174*527a6266SJeff Kirsher #endif 175*527a6266SJeff Kirsher 176*527a6266SJeff Kirsher 177*527a6266SJeff Kirsher /* 178*527a6266SJeff Kirsher * Misc definitions. 179*527a6266SJeff Kirsher */ 180*527a6266SJeff Kirsher #define DEFAULT_RX_QUEUE_SIZE 128 181*527a6266SJeff Kirsher #define DEFAULT_TX_QUEUE_SIZE 256 182*527a6266SJeff Kirsher #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) 183*527a6266SJeff Kirsher 184*527a6266SJeff Kirsher 185*527a6266SJeff Kirsher /* 186*527a6266SJeff Kirsher * RX/TX descriptors. 187*527a6266SJeff Kirsher */ 188*527a6266SJeff Kirsher #if defined(__BIG_ENDIAN) 189*527a6266SJeff Kirsher struct rx_desc { 190*527a6266SJeff Kirsher u16 byte_cnt; /* Descriptor buffer byte count */ 191*527a6266SJeff Kirsher u16 buf_size; /* Buffer size */ 192*527a6266SJeff Kirsher u32 cmd_sts; /* Descriptor command status */ 193*527a6266SJeff Kirsher u32 next_desc_ptr; /* Next descriptor pointer */ 194*527a6266SJeff Kirsher u32 buf_ptr; /* Descriptor buffer pointer */ 195*527a6266SJeff Kirsher }; 196*527a6266SJeff Kirsher 197*527a6266SJeff Kirsher struct tx_desc { 198*527a6266SJeff Kirsher u16 byte_cnt; /* buffer byte count */ 199*527a6266SJeff Kirsher u16 l4i_chk; /* CPU provided TCP checksum */ 200*527a6266SJeff Kirsher u32 cmd_sts; /* Command/status field */ 201*527a6266SJeff Kirsher u32 next_desc_ptr; /* Pointer to next descriptor */ 202*527a6266SJeff Kirsher u32 buf_ptr; /* pointer to buffer for this descriptor*/ 203*527a6266SJeff Kirsher }; 204*527a6266SJeff Kirsher #elif defined(__LITTLE_ENDIAN) 205*527a6266SJeff Kirsher struct rx_desc { 206*527a6266SJeff Kirsher u32 cmd_sts; /* Descriptor command status */ 207*527a6266SJeff Kirsher u16 buf_size; /* Buffer size */ 208*527a6266SJeff Kirsher u16 byte_cnt; /* Descriptor buffer byte count */ 209*527a6266SJeff Kirsher u32 buf_ptr; /* Descriptor buffer pointer */ 210*527a6266SJeff Kirsher u32 next_desc_ptr; /* Next descriptor pointer */ 211*527a6266SJeff Kirsher }; 212*527a6266SJeff Kirsher 213*527a6266SJeff Kirsher struct tx_desc { 214*527a6266SJeff Kirsher u32 cmd_sts; /* Command/status field */ 215*527a6266SJeff Kirsher u16 l4i_chk; /* CPU provided TCP checksum */ 216*527a6266SJeff Kirsher u16 byte_cnt; /* buffer byte count */ 217*527a6266SJeff Kirsher u32 buf_ptr; /* pointer to buffer for this descriptor*/ 218*527a6266SJeff Kirsher u32 next_desc_ptr; /* Pointer to next descriptor */ 219*527a6266SJeff Kirsher }; 220*527a6266SJeff Kirsher #else 221*527a6266SJeff Kirsher #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 222*527a6266SJeff Kirsher #endif 223*527a6266SJeff Kirsher 224*527a6266SJeff Kirsher /* RX & TX descriptor command */ 225*527a6266SJeff Kirsher #define BUFFER_OWNED_BY_DMA 0x80000000 226*527a6266SJeff Kirsher 227*527a6266SJeff Kirsher /* RX & TX descriptor status */ 228*527a6266SJeff Kirsher #define ERROR_SUMMARY 0x00000001 229*527a6266SJeff Kirsher 230*527a6266SJeff Kirsher /* RX descriptor status */ 231*527a6266SJeff Kirsher #define LAYER_4_CHECKSUM_OK 0x40000000 232*527a6266SJeff Kirsher #define RX_ENABLE_INTERRUPT 0x20000000 233*527a6266SJeff Kirsher #define RX_FIRST_DESC 0x08000000 234*527a6266SJeff Kirsher #define RX_LAST_DESC 0x04000000 235*527a6266SJeff Kirsher #define RX_IP_HDR_OK 0x02000000 236*527a6266SJeff Kirsher #define RX_PKT_IS_IPV4 0x01000000 237*527a6266SJeff Kirsher #define RX_PKT_IS_ETHERNETV2 0x00800000 238*527a6266SJeff Kirsher #define RX_PKT_LAYER4_TYPE_MASK 0x00600000 239*527a6266SJeff Kirsher #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 240*527a6266SJeff Kirsher #define RX_PKT_IS_VLAN_TAGGED 0x00080000 241*527a6266SJeff Kirsher 242*527a6266SJeff Kirsher /* TX descriptor command */ 243*527a6266SJeff Kirsher #define TX_ENABLE_INTERRUPT 0x00800000 244*527a6266SJeff Kirsher #define GEN_CRC 0x00400000 245*527a6266SJeff Kirsher #define TX_FIRST_DESC 0x00200000 246*527a6266SJeff Kirsher #define TX_LAST_DESC 0x00100000 247*527a6266SJeff Kirsher #define ZERO_PADDING 0x00080000 248*527a6266SJeff Kirsher #define GEN_IP_V4_CHECKSUM 0x00040000 249*527a6266SJeff Kirsher #define GEN_TCP_UDP_CHECKSUM 0x00020000 250*527a6266SJeff Kirsher #define UDP_FRAME 0x00010000 251*527a6266SJeff Kirsher #define MAC_HDR_EXTRA_4_BYTES 0x00008000 252*527a6266SJeff Kirsher #define MAC_HDR_EXTRA_8_BYTES 0x00000200 253*527a6266SJeff Kirsher 254*527a6266SJeff Kirsher #define TX_IHL_SHIFT 11 255*527a6266SJeff Kirsher 256*527a6266SJeff Kirsher 257*527a6266SJeff Kirsher /* global *******************************************************************/ 258*527a6266SJeff Kirsher struct mv643xx_eth_shared_private { 259*527a6266SJeff Kirsher /* 260*527a6266SJeff Kirsher * Ethernet controller base address. 261*527a6266SJeff Kirsher */ 262*527a6266SJeff Kirsher void __iomem *base; 263*527a6266SJeff Kirsher 264*527a6266SJeff Kirsher /* 265*527a6266SJeff Kirsher * Points at the right SMI instance to use. 266*527a6266SJeff Kirsher */ 267*527a6266SJeff Kirsher struct mv643xx_eth_shared_private *smi; 268*527a6266SJeff Kirsher 269*527a6266SJeff Kirsher /* 270*527a6266SJeff Kirsher * Provides access to local SMI interface. 271*527a6266SJeff Kirsher */ 272*527a6266SJeff Kirsher struct mii_bus *smi_bus; 273*527a6266SJeff Kirsher 274*527a6266SJeff Kirsher /* 275*527a6266SJeff Kirsher * If we have access to the error interrupt pin (which is 276*527a6266SJeff Kirsher * somewhat misnamed as it not only reflects internal errors 277*527a6266SJeff Kirsher * but also reflects SMI completion), use that to wait for 278*527a6266SJeff Kirsher * SMI access completion instead of polling the SMI busy bit. 279*527a6266SJeff Kirsher */ 280*527a6266SJeff Kirsher int err_interrupt; 281*527a6266SJeff Kirsher wait_queue_head_t smi_busy_wait; 282*527a6266SJeff Kirsher 283*527a6266SJeff Kirsher /* 284*527a6266SJeff Kirsher * Per-port MBUS window access register value. 285*527a6266SJeff Kirsher */ 286*527a6266SJeff Kirsher u32 win_protect; 287*527a6266SJeff Kirsher 288*527a6266SJeff Kirsher /* 289*527a6266SJeff Kirsher * Hardware-specific parameters. 290*527a6266SJeff Kirsher */ 291*527a6266SJeff Kirsher unsigned int t_clk; 292*527a6266SJeff Kirsher int extended_rx_coal_limit; 293*527a6266SJeff Kirsher int tx_bw_control; 294*527a6266SJeff Kirsher int tx_csum_limit; 295*527a6266SJeff Kirsher }; 296*527a6266SJeff Kirsher 297*527a6266SJeff Kirsher #define TX_BW_CONTROL_ABSENT 0 298*527a6266SJeff Kirsher #define TX_BW_CONTROL_OLD_LAYOUT 1 299*527a6266SJeff Kirsher #define TX_BW_CONTROL_NEW_LAYOUT 2 300*527a6266SJeff Kirsher 301*527a6266SJeff Kirsher static int mv643xx_eth_open(struct net_device *dev); 302*527a6266SJeff Kirsher static int mv643xx_eth_stop(struct net_device *dev); 303*527a6266SJeff Kirsher 304*527a6266SJeff Kirsher 305*527a6266SJeff Kirsher /* per-port *****************************************************************/ 306*527a6266SJeff Kirsher struct mib_counters { 307*527a6266SJeff Kirsher u64 good_octets_received; 308*527a6266SJeff Kirsher u32 bad_octets_received; 309*527a6266SJeff Kirsher u32 internal_mac_transmit_err; 310*527a6266SJeff Kirsher u32 good_frames_received; 311*527a6266SJeff Kirsher u32 bad_frames_received; 312*527a6266SJeff Kirsher u32 broadcast_frames_received; 313*527a6266SJeff Kirsher u32 multicast_frames_received; 314*527a6266SJeff Kirsher u32 frames_64_octets; 315*527a6266SJeff Kirsher u32 frames_65_to_127_octets; 316*527a6266SJeff Kirsher u32 frames_128_to_255_octets; 317*527a6266SJeff Kirsher u32 frames_256_to_511_octets; 318*527a6266SJeff Kirsher u32 frames_512_to_1023_octets; 319*527a6266SJeff Kirsher u32 frames_1024_to_max_octets; 320*527a6266SJeff Kirsher u64 good_octets_sent; 321*527a6266SJeff Kirsher u32 good_frames_sent; 322*527a6266SJeff Kirsher u32 excessive_collision; 323*527a6266SJeff Kirsher u32 multicast_frames_sent; 324*527a6266SJeff Kirsher u32 broadcast_frames_sent; 325*527a6266SJeff Kirsher u32 unrec_mac_control_received; 326*527a6266SJeff Kirsher u32 fc_sent; 327*527a6266SJeff Kirsher u32 good_fc_received; 328*527a6266SJeff Kirsher u32 bad_fc_received; 329*527a6266SJeff Kirsher u32 undersize_received; 330*527a6266SJeff Kirsher u32 fragments_received; 331*527a6266SJeff Kirsher u32 oversize_received; 332*527a6266SJeff Kirsher u32 jabber_received; 333*527a6266SJeff Kirsher u32 mac_receive_error; 334*527a6266SJeff Kirsher u32 bad_crc_event; 335*527a6266SJeff Kirsher u32 collision; 336*527a6266SJeff Kirsher u32 late_collision; 337*527a6266SJeff Kirsher }; 338*527a6266SJeff Kirsher 339*527a6266SJeff Kirsher struct lro_counters { 340*527a6266SJeff Kirsher u32 lro_aggregated; 341*527a6266SJeff Kirsher u32 lro_flushed; 342*527a6266SJeff Kirsher u32 lro_no_desc; 343*527a6266SJeff Kirsher }; 344*527a6266SJeff Kirsher 345*527a6266SJeff Kirsher struct rx_queue { 346*527a6266SJeff Kirsher int index; 347*527a6266SJeff Kirsher 348*527a6266SJeff Kirsher int rx_ring_size; 349*527a6266SJeff Kirsher 350*527a6266SJeff Kirsher int rx_desc_count; 351*527a6266SJeff Kirsher int rx_curr_desc; 352*527a6266SJeff Kirsher int rx_used_desc; 353*527a6266SJeff Kirsher 354*527a6266SJeff Kirsher struct rx_desc *rx_desc_area; 355*527a6266SJeff Kirsher dma_addr_t rx_desc_dma; 356*527a6266SJeff Kirsher int rx_desc_area_size; 357*527a6266SJeff Kirsher struct sk_buff **rx_skb; 358*527a6266SJeff Kirsher 359*527a6266SJeff Kirsher struct net_lro_mgr lro_mgr; 360*527a6266SJeff Kirsher struct net_lro_desc lro_arr[8]; 361*527a6266SJeff Kirsher }; 362*527a6266SJeff Kirsher 363*527a6266SJeff Kirsher struct tx_queue { 364*527a6266SJeff Kirsher int index; 365*527a6266SJeff Kirsher 366*527a6266SJeff Kirsher int tx_ring_size; 367*527a6266SJeff Kirsher 368*527a6266SJeff Kirsher int tx_desc_count; 369*527a6266SJeff Kirsher int tx_curr_desc; 370*527a6266SJeff Kirsher int tx_used_desc; 371*527a6266SJeff Kirsher 372*527a6266SJeff Kirsher struct tx_desc *tx_desc_area; 373*527a6266SJeff Kirsher dma_addr_t tx_desc_dma; 374*527a6266SJeff Kirsher int tx_desc_area_size; 375*527a6266SJeff Kirsher 376*527a6266SJeff Kirsher struct sk_buff_head tx_skb; 377*527a6266SJeff Kirsher 378*527a6266SJeff Kirsher unsigned long tx_packets; 379*527a6266SJeff Kirsher unsigned long tx_bytes; 380*527a6266SJeff Kirsher unsigned long tx_dropped; 381*527a6266SJeff Kirsher }; 382*527a6266SJeff Kirsher 383*527a6266SJeff Kirsher struct mv643xx_eth_private { 384*527a6266SJeff Kirsher struct mv643xx_eth_shared_private *shared; 385*527a6266SJeff Kirsher void __iomem *base; 386*527a6266SJeff Kirsher int port_num; 387*527a6266SJeff Kirsher 388*527a6266SJeff Kirsher struct net_device *dev; 389*527a6266SJeff Kirsher 390*527a6266SJeff Kirsher struct phy_device *phy; 391*527a6266SJeff Kirsher 392*527a6266SJeff Kirsher struct timer_list mib_counters_timer; 393*527a6266SJeff Kirsher spinlock_t mib_counters_lock; 394*527a6266SJeff Kirsher struct mib_counters mib_counters; 395*527a6266SJeff Kirsher 396*527a6266SJeff Kirsher struct lro_counters lro_counters; 397*527a6266SJeff Kirsher 398*527a6266SJeff Kirsher struct work_struct tx_timeout_task; 399*527a6266SJeff Kirsher 400*527a6266SJeff Kirsher struct napi_struct napi; 401*527a6266SJeff Kirsher u32 int_mask; 402*527a6266SJeff Kirsher u8 oom; 403*527a6266SJeff Kirsher u8 work_link; 404*527a6266SJeff Kirsher u8 work_tx; 405*527a6266SJeff Kirsher u8 work_tx_end; 406*527a6266SJeff Kirsher u8 work_rx; 407*527a6266SJeff Kirsher u8 work_rx_refill; 408*527a6266SJeff Kirsher 409*527a6266SJeff Kirsher int skb_size; 410*527a6266SJeff Kirsher struct sk_buff_head rx_recycle; 411*527a6266SJeff Kirsher 412*527a6266SJeff Kirsher /* 413*527a6266SJeff Kirsher * RX state. 414*527a6266SJeff Kirsher */ 415*527a6266SJeff Kirsher int rx_ring_size; 416*527a6266SJeff Kirsher unsigned long rx_desc_sram_addr; 417*527a6266SJeff Kirsher int rx_desc_sram_size; 418*527a6266SJeff Kirsher int rxq_count; 419*527a6266SJeff Kirsher struct timer_list rx_oom; 420*527a6266SJeff Kirsher struct rx_queue rxq[8]; 421*527a6266SJeff Kirsher 422*527a6266SJeff Kirsher /* 423*527a6266SJeff Kirsher * TX state. 424*527a6266SJeff Kirsher */ 425*527a6266SJeff Kirsher int tx_ring_size; 426*527a6266SJeff Kirsher unsigned long tx_desc_sram_addr; 427*527a6266SJeff Kirsher int tx_desc_sram_size; 428*527a6266SJeff Kirsher int txq_count; 429*527a6266SJeff Kirsher struct tx_queue txq[8]; 430*527a6266SJeff Kirsher }; 431*527a6266SJeff Kirsher 432*527a6266SJeff Kirsher 433*527a6266SJeff Kirsher /* port register accessors **************************************************/ 434*527a6266SJeff Kirsher static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) 435*527a6266SJeff Kirsher { 436*527a6266SJeff Kirsher return readl(mp->shared->base + offset); 437*527a6266SJeff Kirsher } 438*527a6266SJeff Kirsher 439*527a6266SJeff Kirsher static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) 440*527a6266SJeff Kirsher { 441*527a6266SJeff Kirsher return readl(mp->base + offset); 442*527a6266SJeff Kirsher } 443*527a6266SJeff Kirsher 444*527a6266SJeff Kirsher static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) 445*527a6266SJeff Kirsher { 446*527a6266SJeff Kirsher writel(data, mp->shared->base + offset); 447*527a6266SJeff Kirsher } 448*527a6266SJeff Kirsher 449*527a6266SJeff Kirsher static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) 450*527a6266SJeff Kirsher { 451*527a6266SJeff Kirsher writel(data, mp->base + offset); 452*527a6266SJeff Kirsher } 453*527a6266SJeff Kirsher 454*527a6266SJeff Kirsher 455*527a6266SJeff Kirsher /* rxq/txq helper functions *************************************************/ 456*527a6266SJeff Kirsher static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 457*527a6266SJeff Kirsher { 458*527a6266SJeff Kirsher return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); 459*527a6266SJeff Kirsher } 460*527a6266SJeff Kirsher 461*527a6266SJeff Kirsher static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) 462*527a6266SJeff Kirsher { 463*527a6266SJeff Kirsher return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); 464*527a6266SJeff Kirsher } 465*527a6266SJeff Kirsher 466*527a6266SJeff Kirsher static void rxq_enable(struct rx_queue *rxq) 467*527a6266SJeff Kirsher { 468*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 469*527a6266SJeff Kirsher wrlp(mp, RXQ_COMMAND, 1 << rxq->index); 470*527a6266SJeff Kirsher } 471*527a6266SJeff Kirsher 472*527a6266SJeff Kirsher static void rxq_disable(struct rx_queue *rxq) 473*527a6266SJeff Kirsher { 474*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 475*527a6266SJeff Kirsher u8 mask = 1 << rxq->index; 476*527a6266SJeff Kirsher 477*527a6266SJeff Kirsher wrlp(mp, RXQ_COMMAND, mask << 8); 478*527a6266SJeff Kirsher while (rdlp(mp, RXQ_COMMAND) & mask) 479*527a6266SJeff Kirsher udelay(10); 480*527a6266SJeff Kirsher } 481*527a6266SJeff Kirsher 482*527a6266SJeff Kirsher static void txq_reset_hw_ptr(struct tx_queue *txq) 483*527a6266SJeff Kirsher { 484*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 485*527a6266SJeff Kirsher u32 addr; 486*527a6266SJeff Kirsher 487*527a6266SJeff Kirsher addr = (u32)txq->tx_desc_dma; 488*527a6266SJeff Kirsher addr += txq->tx_curr_desc * sizeof(struct tx_desc); 489*527a6266SJeff Kirsher wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); 490*527a6266SJeff Kirsher } 491*527a6266SJeff Kirsher 492*527a6266SJeff Kirsher static void txq_enable(struct tx_queue *txq) 493*527a6266SJeff Kirsher { 494*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 495*527a6266SJeff Kirsher wrlp(mp, TXQ_COMMAND, 1 << txq->index); 496*527a6266SJeff Kirsher } 497*527a6266SJeff Kirsher 498*527a6266SJeff Kirsher static void txq_disable(struct tx_queue *txq) 499*527a6266SJeff Kirsher { 500*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 501*527a6266SJeff Kirsher u8 mask = 1 << txq->index; 502*527a6266SJeff Kirsher 503*527a6266SJeff Kirsher wrlp(mp, TXQ_COMMAND, mask << 8); 504*527a6266SJeff Kirsher while (rdlp(mp, TXQ_COMMAND) & mask) 505*527a6266SJeff Kirsher udelay(10); 506*527a6266SJeff Kirsher } 507*527a6266SJeff Kirsher 508*527a6266SJeff Kirsher static void txq_maybe_wake(struct tx_queue *txq) 509*527a6266SJeff Kirsher { 510*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 511*527a6266SJeff Kirsher struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 512*527a6266SJeff Kirsher 513*527a6266SJeff Kirsher if (netif_tx_queue_stopped(nq)) { 514*527a6266SJeff Kirsher __netif_tx_lock(nq, smp_processor_id()); 515*527a6266SJeff Kirsher if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 516*527a6266SJeff Kirsher netif_tx_wake_queue(nq); 517*527a6266SJeff Kirsher __netif_tx_unlock(nq); 518*527a6266SJeff Kirsher } 519*527a6266SJeff Kirsher } 520*527a6266SJeff Kirsher 521*527a6266SJeff Kirsher 522*527a6266SJeff Kirsher /* rx napi ******************************************************************/ 523*527a6266SJeff Kirsher static int 524*527a6266SJeff Kirsher mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, 525*527a6266SJeff Kirsher u64 *hdr_flags, void *priv) 526*527a6266SJeff Kirsher { 527*527a6266SJeff Kirsher unsigned long cmd_sts = (unsigned long)priv; 528*527a6266SJeff Kirsher 529*527a6266SJeff Kirsher /* 530*527a6266SJeff Kirsher * Make sure that this packet is Ethernet II, is not VLAN 531*527a6266SJeff Kirsher * tagged, is IPv4, has a valid IP header, and is TCP. 532*527a6266SJeff Kirsher */ 533*527a6266SJeff Kirsher if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 534*527a6266SJeff Kirsher RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | 535*527a6266SJeff Kirsher RX_PKT_IS_VLAN_TAGGED)) != 536*527a6266SJeff Kirsher (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 537*527a6266SJeff Kirsher RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) 538*527a6266SJeff Kirsher return -1; 539*527a6266SJeff Kirsher 540*527a6266SJeff Kirsher skb_reset_network_header(skb); 541*527a6266SJeff Kirsher skb_set_transport_header(skb, ip_hdrlen(skb)); 542*527a6266SJeff Kirsher *iphdr = ip_hdr(skb); 543*527a6266SJeff Kirsher *tcph = tcp_hdr(skb); 544*527a6266SJeff Kirsher *hdr_flags = LRO_IPV4 | LRO_TCP; 545*527a6266SJeff Kirsher 546*527a6266SJeff Kirsher return 0; 547*527a6266SJeff Kirsher } 548*527a6266SJeff Kirsher 549*527a6266SJeff Kirsher static int rxq_process(struct rx_queue *rxq, int budget) 550*527a6266SJeff Kirsher { 551*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 552*527a6266SJeff Kirsher struct net_device_stats *stats = &mp->dev->stats; 553*527a6266SJeff Kirsher int lro_flush_needed; 554*527a6266SJeff Kirsher int rx; 555*527a6266SJeff Kirsher 556*527a6266SJeff Kirsher lro_flush_needed = 0; 557*527a6266SJeff Kirsher rx = 0; 558*527a6266SJeff Kirsher while (rx < budget && rxq->rx_desc_count) { 559*527a6266SJeff Kirsher struct rx_desc *rx_desc; 560*527a6266SJeff Kirsher unsigned int cmd_sts; 561*527a6266SJeff Kirsher struct sk_buff *skb; 562*527a6266SJeff Kirsher u16 byte_cnt; 563*527a6266SJeff Kirsher 564*527a6266SJeff Kirsher rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 565*527a6266SJeff Kirsher 566*527a6266SJeff Kirsher cmd_sts = rx_desc->cmd_sts; 567*527a6266SJeff Kirsher if (cmd_sts & BUFFER_OWNED_BY_DMA) 568*527a6266SJeff Kirsher break; 569*527a6266SJeff Kirsher rmb(); 570*527a6266SJeff Kirsher 571*527a6266SJeff Kirsher skb = rxq->rx_skb[rxq->rx_curr_desc]; 572*527a6266SJeff Kirsher rxq->rx_skb[rxq->rx_curr_desc] = NULL; 573*527a6266SJeff Kirsher 574*527a6266SJeff Kirsher rxq->rx_curr_desc++; 575*527a6266SJeff Kirsher if (rxq->rx_curr_desc == rxq->rx_ring_size) 576*527a6266SJeff Kirsher rxq->rx_curr_desc = 0; 577*527a6266SJeff Kirsher 578*527a6266SJeff Kirsher dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, 579*527a6266SJeff Kirsher rx_desc->buf_size, DMA_FROM_DEVICE); 580*527a6266SJeff Kirsher rxq->rx_desc_count--; 581*527a6266SJeff Kirsher rx++; 582*527a6266SJeff Kirsher 583*527a6266SJeff Kirsher mp->work_rx_refill |= 1 << rxq->index; 584*527a6266SJeff Kirsher 585*527a6266SJeff Kirsher byte_cnt = rx_desc->byte_cnt; 586*527a6266SJeff Kirsher 587*527a6266SJeff Kirsher /* 588*527a6266SJeff Kirsher * Update statistics. 589*527a6266SJeff Kirsher * 590*527a6266SJeff Kirsher * Note that the descriptor byte count includes 2 dummy 591*527a6266SJeff Kirsher * bytes automatically inserted by the hardware at the 592*527a6266SJeff Kirsher * start of the packet (which we don't count), and a 4 593*527a6266SJeff Kirsher * byte CRC at the end of the packet (which we do count). 594*527a6266SJeff Kirsher */ 595*527a6266SJeff Kirsher stats->rx_packets++; 596*527a6266SJeff Kirsher stats->rx_bytes += byte_cnt - 2; 597*527a6266SJeff Kirsher 598*527a6266SJeff Kirsher /* 599*527a6266SJeff Kirsher * In case we received a packet without first / last bits 600*527a6266SJeff Kirsher * on, or the error summary bit is set, the packet needs 601*527a6266SJeff Kirsher * to be dropped. 602*527a6266SJeff Kirsher */ 603*527a6266SJeff Kirsher if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) 604*527a6266SJeff Kirsher != (RX_FIRST_DESC | RX_LAST_DESC)) 605*527a6266SJeff Kirsher goto err; 606*527a6266SJeff Kirsher 607*527a6266SJeff Kirsher /* 608*527a6266SJeff Kirsher * The -4 is for the CRC in the trailer of the 609*527a6266SJeff Kirsher * received packet 610*527a6266SJeff Kirsher */ 611*527a6266SJeff Kirsher skb_put(skb, byte_cnt - 2 - 4); 612*527a6266SJeff Kirsher 613*527a6266SJeff Kirsher if (cmd_sts & LAYER_4_CHECKSUM_OK) 614*527a6266SJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 615*527a6266SJeff Kirsher skb->protocol = eth_type_trans(skb, mp->dev); 616*527a6266SJeff Kirsher 617*527a6266SJeff Kirsher if (skb->dev->features & NETIF_F_LRO && 618*527a6266SJeff Kirsher skb->ip_summed == CHECKSUM_UNNECESSARY) { 619*527a6266SJeff Kirsher lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); 620*527a6266SJeff Kirsher lro_flush_needed = 1; 621*527a6266SJeff Kirsher } else 622*527a6266SJeff Kirsher netif_receive_skb(skb); 623*527a6266SJeff Kirsher 624*527a6266SJeff Kirsher continue; 625*527a6266SJeff Kirsher 626*527a6266SJeff Kirsher err: 627*527a6266SJeff Kirsher stats->rx_dropped++; 628*527a6266SJeff Kirsher 629*527a6266SJeff Kirsher if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 630*527a6266SJeff Kirsher (RX_FIRST_DESC | RX_LAST_DESC)) { 631*527a6266SJeff Kirsher if (net_ratelimit()) 632*527a6266SJeff Kirsher netdev_err(mp->dev, 633*527a6266SJeff Kirsher "received packet spanning multiple descriptors\n"); 634*527a6266SJeff Kirsher } 635*527a6266SJeff Kirsher 636*527a6266SJeff Kirsher if (cmd_sts & ERROR_SUMMARY) 637*527a6266SJeff Kirsher stats->rx_errors++; 638*527a6266SJeff Kirsher 639*527a6266SJeff Kirsher dev_kfree_skb(skb); 640*527a6266SJeff Kirsher } 641*527a6266SJeff Kirsher 642*527a6266SJeff Kirsher if (lro_flush_needed) 643*527a6266SJeff Kirsher lro_flush_all(&rxq->lro_mgr); 644*527a6266SJeff Kirsher 645*527a6266SJeff Kirsher if (rx < budget) 646*527a6266SJeff Kirsher mp->work_rx &= ~(1 << rxq->index); 647*527a6266SJeff Kirsher 648*527a6266SJeff Kirsher return rx; 649*527a6266SJeff Kirsher } 650*527a6266SJeff Kirsher 651*527a6266SJeff Kirsher static int rxq_refill(struct rx_queue *rxq, int budget) 652*527a6266SJeff Kirsher { 653*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 654*527a6266SJeff Kirsher int refilled; 655*527a6266SJeff Kirsher 656*527a6266SJeff Kirsher refilled = 0; 657*527a6266SJeff Kirsher while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 658*527a6266SJeff Kirsher struct sk_buff *skb; 659*527a6266SJeff Kirsher int rx; 660*527a6266SJeff Kirsher struct rx_desc *rx_desc; 661*527a6266SJeff Kirsher int size; 662*527a6266SJeff Kirsher 663*527a6266SJeff Kirsher skb = __skb_dequeue(&mp->rx_recycle); 664*527a6266SJeff Kirsher if (skb == NULL) 665*527a6266SJeff Kirsher skb = dev_alloc_skb(mp->skb_size); 666*527a6266SJeff Kirsher 667*527a6266SJeff Kirsher if (skb == NULL) { 668*527a6266SJeff Kirsher mp->oom = 1; 669*527a6266SJeff Kirsher goto oom; 670*527a6266SJeff Kirsher } 671*527a6266SJeff Kirsher 672*527a6266SJeff Kirsher if (SKB_DMA_REALIGN) 673*527a6266SJeff Kirsher skb_reserve(skb, SKB_DMA_REALIGN); 674*527a6266SJeff Kirsher 675*527a6266SJeff Kirsher refilled++; 676*527a6266SJeff Kirsher rxq->rx_desc_count++; 677*527a6266SJeff Kirsher 678*527a6266SJeff Kirsher rx = rxq->rx_used_desc++; 679*527a6266SJeff Kirsher if (rxq->rx_used_desc == rxq->rx_ring_size) 680*527a6266SJeff Kirsher rxq->rx_used_desc = 0; 681*527a6266SJeff Kirsher 682*527a6266SJeff Kirsher rx_desc = rxq->rx_desc_area + rx; 683*527a6266SJeff Kirsher 684*527a6266SJeff Kirsher size = skb->end - skb->data; 685*527a6266SJeff Kirsher rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, 686*527a6266SJeff Kirsher skb->data, size, 687*527a6266SJeff Kirsher DMA_FROM_DEVICE); 688*527a6266SJeff Kirsher rx_desc->buf_size = size; 689*527a6266SJeff Kirsher rxq->rx_skb[rx] = skb; 690*527a6266SJeff Kirsher wmb(); 691*527a6266SJeff Kirsher rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; 692*527a6266SJeff Kirsher wmb(); 693*527a6266SJeff Kirsher 694*527a6266SJeff Kirsher /* 695*527a6266SJeff Kirsher * The hardware automatically prepends 2 bytes of 696*527a6266SJeff Kirsher * dummy data to each received packet, so that the 697*527a6266SJeff Kirsher * IP header ends up 16-byte aligned. 698*527a6266SJeff Kirsher */ 699*527a6266SJeff Kirsher skb_reserve(skb, 2); 700*527a6266SJeff Kirsher } 701*527a6266SJeff Kirsher 702*527a6266SJeff Kirsher if (refilled < budget) 703*527a6266SJeff Kirsher mp->work_rx_refill &= ~(1 << rxq->index); 704*527a6266SJeff Kirsher 705*527a6266SJeff Kirsher oom: 706*527a6266SJeff Kirsher return refilled; 707*527a6266SJeff Kirsher } 708*527a6266SJeff Kirsher 709*527a6266SJeff Kirsher 710*527a6266SJeff Kirsher /* tx ***********************************************************************/ 711*527a6266SJeff Kirsher static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) 712*527a6266SJeff Kirsher { 713*527a6266SJeff Kirsher int frag; 714*527a6266SJeff Kirsher 715*527a6266SJeff Kirsher for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 716*527a6266SJeff Kirsher skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 717*527a6266SJeff Kirsher if (fragp->size <= 8 && fragp->page_offset & 7) 718*527a6266SJeff Kirsher return 1; 719*527a6266SJeff Kirsher } 720*527a6266SJeff Kirsher 721*527a6266SJeff Kirsher return 0; 722*527a6266SJeff Kirsher } 723*527a6266SJeff Kirsher 724*527a6266SJeff Kirsher static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 725*527a6266SJeff Kirsher { 726*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 727*527a6266SJeff Kirsher int nr_frags = skb_shinfo(skb)->nr_frags; 728*527a6266SJeff Kirsher int frag; 729*527a6266SJeff Kirsher 730*527a6266SJeff Kirsher for (frag = 0; frag < nr_frags; frag++) { 731*527a6266SJeff Kirsher skb_frag_t *this_frag; 732*527a6266SJeff Kirsher int tx_index; 733*527a6266SJeff Kirsher struct tx_desc *desc; 734*527a6266SJeff Kirsher 735*527a6266SJeff Kirsher this_frag = &skb_shinfo(skb)->frags[frag]; 736*527a6266SJeff Kirsher tx_index = txq->tx_curr_desc++; 737*527a6266SJeff Kirsher if (txq->tx_curr_desc == txq->tx_ring_size) 738*527a6266SJeff Kirsher txq->tx_curr_desc = 0; 739*527a6266SJeff Kirsher desc = &txq->tx_desc_area[tx_index]; 740*527a6266SJeff Kirsher 741*527a6266SJeff Kirsher /* 742*527a6266SJeff Kirsher * The last fragment will generate an interrupt 743*527a6266SJeff Kirsher * which will free the skb on TX completion. 744*527a6266SJeff Kirsher */ 745*527a6266SJeff Kirsher if (frag == nr_frags - 1) { 746*527a6266SJeff Kirsher desc->cmd_sts = BUFFER_OWNED_BY_DMA | 747*527a6266SJeff Kirsher ZERO_PADDING | TX_LAST_DESC | 748*527a6266SJeff Kirsher TX_ENABLE_INTERRUPT; 749*527a6266SJeff Kirsher } else { 750*527a6266SJeff Kirsher desc->cmd_sts = BUFFER_OWNED_BY_DMA; 751*527a6266SJeff Kirsher } 752*527a6266SJeff Kirsher 753*527a6266SJeff Kirsher desc->l4i_chk = 0; 754*527a6266SJeff Kirsher desc->byte_cnt = this_frag->size; 755*527a6266SJeff Kirsher desc->buf_ptr = dma_map_page(mp->dev->dev.parent, 756*527a6266SJeff Kirsher this_frag->page, 757*527a6266SJeff Kirsher this_frag->page_offset, 758*527a6266SJeff Kirsher this_frag->size, DMA_TO_DEVICE); 759*527a6266SJeff Kirsher } 760*527a6266SJeff Kirsher } 761*527a6266SJeff Kirsher 762*527a6266SJeff Kirsher static inline __be16 sum16_as_be(__sum16 sum) 763*527a6266SJeff Kirsher { 764*527a6266SJeff Kirsher return (__force __be16)sum; 765*527a6266SJeff Kirsher } 766*527a6266SJeff Kirsher 767*527a6266SJeff Kirsher static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 768*527a6266SJeff Kirsher { 769*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 770*527a6266SJeff Kirsher int nr_frags = skb_shinfo(skb)->nr_frags; 771*527a6266SJeff Kirsher int tx_index; 772*527a6266SJeff Kirsher struct tx_desc *desc; 773*527a6266SJeff Kirsher u32 cmd_sts; 774*527a6266SJeff Kirsher u16 l4i_chk; 775*527a6266SJeff Kirsher int length; 776*527a6266SJeff Kirsher 777*527a6266SJeff Kirsher cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 778*527a6266SJeff Kirsher l4i_chk = 0; 779*527a6266SJeff Kirsher 780*527a6266SJeff Kirsher if (skb->ip_summed == CHECKSUM_PARTIAL) { 781*527a6266SJeff Kirsher int hdr_len; 782*527a6266SJeff Kirsher int tag_bytes; 783*527a6266SJeff Kirsher 784*527a6266SJeff Kirsher BUG_ON(skb->protocol != htons(ETH_P_IP) && 785*527a6266SJeff Kirsher skb->protocol != htons(ETH_P_8021Q)); 786*527a6266SJeff Kirsher 787*527a6266SJeff Kirsher hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; 788*527a6266SJeff Kirsher tag_bytes = hdr_len - ETH_HLEN; 789*527a6266SJeff Kirsher if (skb->len - hdr_len > mp->shared->tx_csum_limit || 790*527a6266SJeff Kirsher unlikely(tag_bytes & ~12)) { 791*527a6266SJeff Kirsher if (skb_checksum_help(skb) == 0) 792*527a6266SJeff Kirsher goto no_csum; 793*527a6266SJeff Kirsher kfree_skb(skb); 794*527a6266SJeff Kirsher return 1; 795*527a6266SJeff Kirsher } 796*527a6266SJeff Kirsher 797*527a6266SJeff Kirsher if (tag_bytes & 4) 798*527a6266SJeff Kirsher cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 799*527a6266SJeff Kirsher if (tag_bytes & 8) 800*527a6266SJeff Kirsher cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 801*527a6266SJeff Kirsher 802*527a6266SJeff Kirsher cmd_sts |= GEN_TCP_UDP_CHECKSUM | 803*527a6266SJeff Kirsher GEN_IP_V4_CHECKSUM | 804*527a6266SJeff Kirsher ip_hdr(skb)->ihl << TX_IHL_SHIFT; 805*527a6266SJeff Kirsher 806*527a6266SJeff Kirsher switch (ip_hdr(skb)->protocol) { 807*527a6266SJeff Kirsher case IPPROTO_UDP: 808*527a6266SJeff Kirsher cmd_sts |= UDP_FRAME; 809*527a6266SJeff Kirsher l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 810*527a6266SJeff Kirsher break; 811*527a6266SJeff Kirsher case IPPROTO_TCP: 812*527a6266SJeff Kirsher l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 813*527a6266SJeff Kirsher break; 814*527a6266SJeff Kirsher default: 815*527a6266SJeff Kirsher BUG(); 816*527a6266SJeff Kirsher } 817*527a6266SJeff Kirsher } else { 818*527a6266SJeff Kirsher no_csum: 819*527a6266SJeff Kirsher /* Errata BTS #50, IHL must be 5 if no HW checksum */ 820*527a6266SJeff Kirsher cmd_sts |= 5 << TX_IHL_SHIFT; 821*527a6266SJeff Kirsher } 822*527a6266SJeff Kirsher 823*527a6266SJeff Kirsher tx_index = txq->tx_curr_desc++; 824*527a6266SJeff Kirsher if (txq->tx_curr_desc == txq->tx_ring_size) 825*527a6266SJeff Kirsher txq->tx_curr_desc = 0; 826*527a6266SJeff Kirsher desc = &txq->tx_desc_area[tx_index]; 827*527a6266SJeff Kirsher 828*527a6266SJeff Kirsher if (nr_frags) { 829*527a6266SJeff Kirsher txq_submit_frag_skb(txq, skb); 830*527a6266SJeff Kirsher length = skb_headlen(skb); 831*527a6266SJeff Kirsher } else { 832*527a6266SJeff Kirsher cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; 833*527a6266SJeff Kirsher length = skb->len; 834*527a6266SJeff Kirsher } 835*527a6266SJeff Kirsher 836*527a6266SJeff Kirsher desc->l4i_chk = l4i_chk; 837*527a6266SJeff Kirsher desc->byte_cnt = length; 838*527a6266SJeff Kirsher desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, 839*527a6266SJeff Kirsher length, DMA_TO_DEVICE); 840*527a6266SJeff Kirsher 841*527a6266SJeff Kirsher __skb_queue_tail(&txq->tx_skb, skb); 842*527a6266SJeff Kirsher 843*527a6266SJeff Kirsher skb_tx_timestamp(skb); 844*527a6266SJeff Kirsher 845*527a6266SJeff Kirsher /* ensure all other descriptors are written before first cmd_sts */ 846*527a6266SJeff Kirsher wmb(); 847*527a6266SJeff Kirsher desc->cmd_sts = cmd_sts; 848*527a6266SJeff Kirsher 849*527a6266SJeff Kirsher /* clear TX_END status */ 850*527a6266SJeff Kirsher mp->work_tx_end &= ~(1 << txq->index); 851*527a6266SJeff Kirsher 852*527a6266SJeff Kirsher /* ensure all descriptors are written before poking hardware */ 853*527a6266SJeff Kirsher wmb(); 854*527a6266SJeff Kirsher txq_enable(txq); 855*527a6266SJeff Kirsher 856*527a6266SJeff Kirsher txq->tx_desc_count += nr_frags + 1; 857*527a6266SJeff Kirsher 858*527a6266SJeff Kirsher return 0; 859*527a6266SJeff Kirsher } 860*527a6266SJeff Kirsher 861*527a6266SJeff Kirsher static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 862*527a6266SJeff Kirsher { 863*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 864*527a6266SJeff Kirsher int length, queue; 865*527a6266SJeff Kirsher struct tx_queue *txq; 866*527a6266SJeff Kirsher struct netdev_queue *nq; 867*527a6266SJeff Kirsher 868*527a6266SJeff Kirsher queue = skb_get_queue_mapping(skb); 869*527a6266SJeff Kirsher txq = mp->txq + queue; 870*527a6266SJeff Kirsher nq = netdev_get_tx_queue(dev, queue); 871*527a6266SJeff Kirsher 872*527a6266SJeff Kirsher if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 873*527a6266SJeff Kirsher txq->tx_dropped++; 874*527a6266SJeff Kirsher netdev_printk(KERN_DEBUG, dev, 875*527a6266SJeff Kirsher "failed to linearize skb with tiny unaligned fragment\n"); 876*527a6266SJeff Kirsher return NETDEV_TX_BUSY; 877*527a6266SJeff Kirsher } 878*527a6266SJeff Kirsher 879*527a6266SJeff Kirsher if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 880*527a6266SJeff Kirsher if (net_ratelimit()) 881*527a6266SJeff Kirsher netdev_err(dev, "tx queue full?!\n"); 882*527a6266SJeff Kirsher kfree_skb(skb); 883*527a6266SJeff Kirsher return NETDEV_TX_OK; 884*527a6266SJeff Kirsher } 885*527a6266SJeff Kirsher 886*527a6266SJeff Kirsher length = skb->len; 887*527a6266SJeff Kirsher 888*527a6266SJeff Kirsher if (!txq_submit_skb(txq, skb)) { 889*527a6266SJeff Kirsher int entries_left; 890*527a6266SJeff Kirsher 891*527a6266SJeff Kirsher txq->tx_bytes += length; 892*527a6266SJeff Kirsher txq->tx_packets++; 893*527a6266SJeff Kirsher 894*527a6266SJeff Kirsher entries_left = txq->tx_ring_size - txq->tx_desc_count; 895*527a6266SJeff Kirsher if (entries_left < MAX_SKB_FRAGS + 1) 896*527a6266SJeff Kirsher netif_tx_stop_queue(nq); 897*527a6266SJeff Kirsher } 898*527a6266SJeff Kirsher 899*527a6266SJeff Kirsher return NETDEV_TX_OK; 900*527a6266SJeff Kirsher } 901*527a6266SJeff Kirsher 902*527a6266SJeff Kirsher 903*527a6266SJeff Kirsher /* tx napi ******************************************************************/ 904*527a6266SJeff Kirsher static void txq_kick(struct tx_queue *txq) 905*527a6266SJeff Kirsher { 906*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 907*527a6266SJeff Kirsher struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 908*527a6266SJeff Kirsher u32 hw_desc_ptr; 909*527a6266SJeff Kirsher u32 expected_ptr; 910*527a6266SJeff Kirsher 911*527a6266SJeff Kirsher __netif_tx_lock(nq, smp_processor_id()); 912*527a6266SJeff Kirsher 913*527a6266SJeff Kirsher if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) 914*527a6266SJeff Kirsher goto out; 915*527a6266SJeff Kirsher 916*527a6266SJeff Kirsher hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); 917*527a6266SJeff Kirsher expected_ptr = (u32)txq->tx_desc_dma + 918*527a6266SJeff Kirsher txq->tx_curr_desc * sizeof(struct tx_desc); 919*527a6266SJeff Kirsher 920*527a6266SJeff Kirsher if (hw_desc_ptr != expected_ptr) 921*527a6266SJeff Kirsher txq_enable(txq); 922*527a6266SJeff Kirsher 923*527a6266SJeff Kirsher out: 924*527a6266SJeff Kirsher __netif_tx_unlock(nq); 925*527a6266SJeff Kirsher 926*527a6266SJeff Kirsher mp->work_tx_end &= ~(1 << txq->index); 927*527a6266SJeff Kirsher } 928*527a6266SJeff Kirsher 929*527a6266SJeff Kirsher static int txq_reclaim(struct tx_queue *txq, int budget, int force) 930*527a6266SJeff Kirsher { 931*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 932*527a6266SJeff Kirsher struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 933*527a6266SJeff Kirsher int reclaimed; 934*527a6266SJeff Kirsher 935*527a6266SJeff Kirsher __netif_tx_lock(nq, smp_processor_id()); 936*527a6266SJeff Kirsher 937*527a6266SJeff Kirsher reclaimed = 0; 938*527a6266SJeff Kirsher while (reclaimed < budget && txq->tx_desc_count > 0) { 939*527a6266SJeff Kirsher int tx_index; 940*527a6266SJeff Kirsher struct tx_desc *desc; 941*527a6266SJeff Kirsher u32 cmd_sts; 942*527a6266SJeff Kirsher struct sk_buff *skb; 943*527a6266SJeff Kirsher 944*527a6266SJeff Kirsher tx_index = txq->tx_used_desc; 945*527a6266SJeff Kirsher desc = &txq->tx_desc_area[tx_index]; 946*527a6266SJeff Kirsher cmd_sts = desc->cmd_sts; 947*527a6266SJeff Kirsher 948*527a6266SJeff Kirsher if (cmd_sts & BUFFER_OWNED_BY_DMA) { 949*527a6266SJeff Kirsher if (!force) 950*527a6266SJeff Kirsher break; 951*527a6266SJeff Kirsher desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; 952*527a6266SJeff Kirsher } 953*527a6266SJeff Kirsher 954*527a6266SJeff Kirsher txq->tx_used_desc = tx_index + 1; 955*527a6266SJeff Kirsher if (txq->tx_used_desc == txq->tx_ring_size) 956*527a6266SJeff Kirsher txq->tx_used_desc = 0; 957*527a6266SJeff Kirsher 958*527a6266SJeff Kirsher reclaimed++; 959*527a6266SJeff Kirsher txq->tx_desc_count--; 960*527a6266SJeff Kirsher 961*527a6266SJeff Kirsher skb = NULL; 962*527a6266SJeff Kirsher if (cmd_sts & TX_LAST_DESC) 963*527a6266SJeff Kirsher skb = __skb_dequeue(&txq->tx_skb); 964*527a6266SJeff Kirsher 965*527a6266SJeff Kirsher if (cmd_sts & ERROR_SUMMARY) { 966*527a6266SJeff Kirsher netdev_info(mp->dev, "tx error\n"); 967*527a6266SJeff Kirsher mp->dev->stats.tx_errors++; 968*527a6266SJeff Kirsher } 969*527a6266SJeff Kirsher 970*527a6266SJeff Kirsher if (cmd_sts & TX_FIRST_DESC) { 971*527a6266SJeff Kirsher dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 972*527a6266SJeff Kirsher desc->byte_cnt, DMA_TO_DEVICE); 973*527a6266SJeff Kirsher } else { 974*527a6266SJeff Kirsher dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, 975*527a6266SJeff Kirsher desc->byte_cnt, DMA_TO_DEVICE); 976*527a6266SJeff Kirsher } 977*527a6266SJeff Kirsher 978*527a6266SJeff Kirsher if (skb != NULL) { 979*527a6266SJeff Kirsher if (skb_queue_len(&mp->rx_recycle) < 980*527a6266SJeff Kirsher mp->rx_ring_size && 981*527a6266SJeff Kirsher skb_recycle_check(skb, mp->skb_size)) 982*527a6266SJeff Kirsher __skb_queue_head(&mp->rx_recycle, skb); 983*527a6266SJeff Kirsher else 984*527a6266SJeff Kirsher dev_kfree_skb(skb); 985*527a6266SJeff Kirsher } 986*527a6266SJeff Kirsher } 987*527a6266SJeff Kirsher 988*527a6266SJeff Kirsher __netif_tx_unlock(nq); 989*527a6266SJeff Kirsher 990*527a6266SJeff Kirsher if (reclaimed < budget) 991*527a6266SJeff Kirsher mp->work_tx &= ~(1 << txq->index); 992*527a6266SJeff Kirsher 993*527a6266SJeff Kirsher return reclaimed; 994*527a6266SJeff Kirsher } 995*527a6266SJeff Kirsher 996*527a6266SJeff Kirsher 997*527a6266SJeff Kirsher /* tx rate control **********************************************************/ 998*527a6266SJeff Kirsher /* 999*527a6266SJeff Kirsher * Set total maximum TX rate (shared by all TX queues for this port) 1000*527a6266SJeff Kirsher * to 'rate' bits per second, with a maximum burst of 'burst' bytes. 1001*527a6266SJeff Kirsher */ 1002*527a6266SJeff Kirsher static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) 1003*527a6266SJeff Kirsher { 1004*527a6266SJeff Kirsher int token_rate; 1005*527a6266SJeff Kirsher int mtu; 1006*527a6266SJeff Kirsher int bucket_size; 1007*527a6266SJeff Kirsher 1008*527a6266SJeff Kirsher token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1009*527a6266SJeff Kirsher if (token_rate > 1023) 1010*527a6266SJeff Kirsher token_rate = 1023; 1011*527a6266SJeff Kirsher 1012*527a6266SJeff Kirsher mtu = (mp->dev->mtu + 255) >> 8; 1013*527a6266SJeff Kirsher if (mtu > 63) 1014*527a6266SJeff Kirsher mtu = 63; 1015*527a6266SJeff Kirsher 1016*527a6266SJeff Kirsher bucket_size = (burst + 255) >> 8; 1017*527a6266SJeff Kirsher if (bucket_size > 65535) 1018*527a6266SJeff Kirsher bucket_size = 65535; 1019*527a6266SJeff Kirsher 1020*527a6266SJeff Kirsher switch (mp->shared->tx_bw_control) { 1021*527a6266SJeff Kirsher case TX_BW_CONTROL_OLD_LAYOUT: 1022*527a6266SJeff Kirsher wrlp(mp, TX_BW_RATE, token_rate); 1023*527a6266SJeff Kirsher wrlp(mp, TX_BW_MTU, mtu); 1024*527a6266SJeff Kirsher wrlp(mp, TX_BW_BURST, bucket_size); 1025*527a6266SJeff Kirsher break; 1026*527a6266SJeff Kirsher case TX_BW_CONTROL_NEW_LAYOUT: 1027*527a6266SJeff Kirsher wrlp(mp, TX_BW_RATE_MOVED, token_rate); 1028*527a6266SJeff Kirsher wrlp(mp, TX_BW_MTU_MOVED, mtu); 1029*527a6266SJeff Kirsher wrlp(mp, TX_BW_BURST_MOVED, bucket_size); 1030*527a6266SJeff Kirsher break; 1031*527a6266SJeff Kirsher } 1032*527a6266SJeff Kirsher } 1033*527a6266SJeff Kirsher 1034*527a6266SJeff Kirsher static void txq_set_rate(struct tx_queue *txq, int rate, int burst) 1035*527a6266SJeff Kirsher { 1036*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 1037*527a6266SJeff Kirsher int token_rate; 1038*527a6266SJeff Kirsher int bucket_size; 1039*527a6266SJeff Kirsher 1040*527a6266SJeff Kirsher token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1041*527a6266SJeff Kirsher if (token_rate > 1023) 1042*527a6266SJeff Kirsher token_rate = 1023; 1043*527a6266SJeff Kirsher 1044*527a6266SJeff Kirsher bucket_size = (burst + 255) >> 8; 1045*527a6266SJeff Kirsher if (bucket_size > 65535) 1046*527a6266SJeff Kirsher bucket_size = 65535; 1047*527a6266SJeff Kirsher 1048*527a6266SJeff Kirsher wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); 1049*527a6266SJeff Kirsher wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); 1050*527a6266SJeff Kirsher } 1051*527a6266SJeff Kirsher 1052*527a6266SJeff Kirsher static void txq_set_fixed_prio_mode(struct tx_queue *txq) 1053*527a6266SJeff Kirsher { 1054*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 1055*527a6266SJeff Kirsher int off; 1056*527a6266SJeff Kirsher u32 val; 1057*527a6266SJeff Kirsher 1058*527a6266SJeff Kirsher /* 1059*527a6266SJeff Kirsher * Turn on fixed priority mode. 1060*527a6266SJeff Kirsher */ 1061*527a6266SJeff Kirsher off = 0; 1062*527a6266SJeff Kirsher switch (mp->shared->tx_bw_control) { 1063*527a6266SJeff Kirsher case TX_BW_CONTROL_OLD_LAYOUT: 1064*527a6266SJeff Kirsher off = TXQ_FIX_PRIO_CONF; 1065*527a6266SJeff Kirsher break; 1066*527a6266SJeff Kirsher case TX_BW_CONTROL_NEW_LAYOUT: 1067*527a6266SJeff Kirsher off = TXQ_FIX_PRIO_CONF_MOVED; 1068*527a6266SJeff Kirsher break; 1069*527a6266SJeff Kirsher } 1070*527a6266SJeff Kirsher 1071*527a6266SJeff Kirsher if (off) { 1072*527a6266SJeff Kirsher val = rdlp(mp, off); 1073*527a6266SJeff Kirsher val |= 1 << txq->index; 1074*527a6266SJeff Kirsher wrlp(mp, off, val); 1075*527a6266SJeff Kirsher } 1076*527a6266SJeff Kirsher } 1077*527a6266SJeff Kirsher 1078*527a6266SJeff Kirsher 1079*527a6266SJeff Kirsher /* mii management interface *************************************************/ 1080*527a6266SJeff Kirsher static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1081*527a6266SJeff Kirsher { 1082*527a6266SJeff Kirsher struct mv643xx_eth_shared_private *msp = dev_id; 1083*527a6266SJeff Kirsher 1084*527a6266SJeff Kirsher if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { 1085*527a6266SJeff Kirsher writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); 1086*527a6266SJeff Kirsher wake_up(&msp->smi_busy_wait); 1087*527a6266SJeff Kirsher return IRQ_HANDLED; 1088*527a6266SJeff Kirsher } 1089*527a6266SJeff Kirsher 1090*527a6266SJeff Kirsher return IRQ_NONE; 1091*527a6266SJeff Kirsher } 1092*527a6266SJeff Kirsher 1093*527a6266SJeff Kirsher static int smi_is_done(struct mv643xx_eth_shared_private *msp) 1094*527a6266SJeff Kirsher { 1095*527a6266SJeff Kirsher return !(readl(msp->base + SMI_REG) & SMI_BUSY); 1096*527a6266SJeff Kirsher } 1097*527a6266SJeff Kirsher 1098*527a6266SJeff Kirsher static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) 1099*527a6266SJeff Kirsher { 1100*527a6266SJeff Kirsher if (msp->err_interrupt == NO_IRQ) { 1101*527a6266SJeff Kirsher int i; 1102*527a6266SJeff Kirsher 1103*527a6266SJeff Kirsher for (i = 0; !smi_is_done(msp); i++) { 1104*527a6266SJeff Kirsher if (i == 10) 1105*527a6266SJeff Kirsher return -ETIMEDOUT; 1106*527a6266SJeff Kirsher msleep(10); 1107*527a6266SJeff Kirsher } 1108*527a6266SJeff Kirsher 1109*527a6266SJeff Kirsher return 0; 1110*527a6266SJeff Kirsher } 1111*527a6266SJeff Kirsher 1112*527a6266SJeff Kirsher if (!smi_is_done(msp)) { 1113*527a6266SJeff Kirsher wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1114*527a6266SJeff Kirsher msecs_to_jiffies(100)); 1115*527a6266SJeff Kirsher if (!smi_is_done(msp)) 1116*527a6266SJeff Kirsher return -ETIMEDOUT; 1117*527a6266SJeff Kirsher } 1118*527a6266SJeff Kirsher 1119*527a6266SJeff Kirsher return 0; 1120*527a6266SJeff Kirsher } 1121*527a6266SJeff Kirsher 1122*527a6266SJeff Kirsher static int smi_bus_read(struct mii_bus *bus, int addr, int reg) 1123*527a6266SJeff Kirsher { 1124*527a6266SJeff Kirsher struct mv643xx_eth_shared_private *msp = bus->priv; 1125*527a6266SJeff Kirsher void __iomem *smi_reg = msp->base + SMI_REG; 1126*527a6266SJeff Kirsher int ret; 1127*527a6266SJeff Kirsher 1128*527a6266SJeff Kirsher if (smi_wait_ready(msp)) { 1129*527a6266SJeff Kirsher pr_warn("SMI bus busy timeout\n"); 1130*527a6266SJeff Kirsher return -ETIMEDOUT; 1131*527a6266SJeff Kirsher } 1132*527a6266SJeff Kirsher 1133*527a6266SJeff Kirsher writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1134*527a6266SJeff Kirsher 1135*527a6266SJeff Kirsher if (smi_wait_ready(msp)) { 1136*527a6266SJeff Kirsher pr_warn("SMI bus busy timeout\n"); 1137*527a6266SJeff Kirsher return -ETIMEDOUT; 1138*527a6266SJeff Kirsher } 1139*527a6266SJeff Kirsher 1140*527a6266SJeff Kirsher ret = readl(smi_reg); 1141*527a6266SJeff Kirsher if (!(ret & SMI_READ_VALID)) { 1142*527a6266SJeff Kirsher pr_warn("SMI bus read not valid\n"); 1143*527a6266SJeff Kirsher return -ENODEV; 1144*527a6266SJeff Kirsher } 1145*527a6266SJeff Kirsher 1146*527a6266SJeff Kirsher return ret & 0xffff; 1147*527a6266SJeff Kirsher } 1148*527a6266SJeff Kirsher 1149*527a6266SJeff Kirsher static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) 1150*527a6266SJeff Kirsher { 1151*527a6266SJeff Kirsher struct mv643xx_eth_shared_private *msp = bus->priv; 1152*527a6266SJeff Kirsher void __iomem *smi_reg = msp->base + SMI_REG; 1153*527a6266SJeff Kirsher 1154*527a6266SJeff Kirsher if (smi_wait_ready(msp)) { 1155*527a6266SJeff Kirsher pr_warn("SMI bus busy timeout\n"); 1156*527a6266SJeff Kirsher return -ETIMEDOUT; 1157*527a6266SJeff Kirsher } 1158*527a6266SJeff Kirsher 1159*527a6266SJeff Kirsher writel(SMI_OPCODE_WRITE | (reg << 21) | 1160*527a6266SJeff Kirsher (addr << 16) | (val & 0xffff), smi_reg); 1161*527a6266SJeff Kirsher 1162*527a6266SJeff Kirsher if (smi_wait_ready(msp)) { 1163*527a6266SJeff Kirsher pr_warn("SMI bus busy timeout\n"); 1164*527a6266SJeff Kirsher return -ETIMEDOUT; 1165*527a6266SJeff Kirsher } 1166*527a6266SJeff Kirsher 1167*527a6266SJeff Kirsher return 0; 1168*527a6266SJeff Kirsher } 1169*527a6266SJeff Kirsher 1170*527a6266SJeff Kirsher 1171*527a6266SJeff Kirsher /* statistics ***************************************************************/ 1172*527a6266SJeff Kirsher static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1173*527a6266SJeff Kirsher { 1174*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1175*527a6266SJeff Kirsher struct net_device_stats *stats = &dev->stats; 1176*527a6266SJeff Kirsher unsigned long tx_packets = 0; 1177*527a6266SJeff Kirsher unsigned long tx_bytes = 0; 1178*527a6266SJeff Kirsher unsigned long tx_dropped = 0; 1179*527a6266SJeff Kirsher int i; 1180*527a6266SJeff Kirsher 1181*527a6266SJeff Kirsher for (i = 0; i < mp->txq_count; i++) { 1182*527a6266SJeff Kirsher struct tx_queue *txq = mp->txq + i; 1183*527a6266SJeff Kirsher 1184*527a6266SJeff Kirsher tx_packets += txq->tx_packets; 1185*527a6266SJeff Kirsher tx_bytes += txq->tx_bytes; 1186*527a6266SJeff Kirsher tx_dropped += txq->tx_dropped; 1187*527a6266SJeff Kirsher } 1188*527a6266SJeff Kirsher 1189*527a6266SJeff Kirsher stats->tx_packets = tx_packets; 1190*527a6266SJeff Kirsher stats->tx_bytes = tx_bytes; 1191*527a6266SJeff Kirsher stats->tx_dropped = tx_dropped; 1192*527a6266SJeff Kirsher 1193*527a6266SJeff Kirsher return stats; 1194*527a6266SJeff Kirsher } 1195*527a6266SJeff Kirsher 1196*527a6266SJeff Kirsher static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) 1197*527a6266SJeff Kirsher { 1198*527a6266SJeff Kirsher u32 lro_aggregated = 0; 1199*527a6266SJeff Kirsher u32 lro_flushed = 0; 1200*527a6266SJeff Kirsher u32 lro_no_desc = 0; 1201*527a6266SJeff Kirsher int i; 1202*527a6266SJeff Kirsher 1203*527a6266SJeff Kirsher for (i = 0; i < mp->rxq_count; i++) { 1204*527a6266SJeff Kirsher struct rx_queue *rxq = mp->rxq + i; 1205*527a6266SJeff Kirsher 1206*527a6266SJeff Kirsher lro_aggregated += rxq->lro_mgr.stats.aggregated; 1207*527a6266SJeff Kirsher lro_flushed += rxq->lro_mgr.stats.flushed; 1208*527a6266SJeff Kirsher lro_no_desc += rxq->lro_mgr.stats.no_desc; 1209*527a6266SJeff Kirsher } 1210*527a6266SJeff Kirsher 1211*527a6266SJeff Kirsher mp->lro_counters.lro_aggregated = lro_aggregated; 1212*527a6266SJeff Kirsher mp->lro_counters.lro_flushed = lro_flushed; 1213*527a6266SJeff Kirsher mp->lro_counters.lro_no_desc = lro_no_desc; 1214*527a6266SJeff Kirsher } 1215*527a6266SJeff Kirsher 1216*527a6266SJeff Kirsher static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1217*527a6266SJeff Kirsher { 1218*527a6266SJeff Kirsher return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1219*527a6266SJeff Kirsher } 1220*527a6266SJeff Kirsher 1221*527a6266SJeff Kirsher static void mib_counters_clear(struct mv643xx_eth_private *mp) 1222*527a6266SJeff Kirsher { 1223*527a6266SJeff Kirsher int i; 1224*527a6266SJeff Kirsher 1225*527a6266SJeff Kirsher for (i = 0; i < 0x80; i += 4) 1226*527a6266SJeff Kirsher mib_read(mp, i); 1227*527a6266SJeff Kirsher } 1228*527a6266SJeff Kirsher 1229*527a6266SJeff Kirsher static void mib_counters_update(struct mv643xx_eth_private *mp) 1230*527a6266SJeff Kirsher { 1231*527a6266SJeff Kirsher struct mib_counters *p = &mp->mib_counters; 1232*527a6266SJeff Kirsher 1233*527a6266SJeff Kirsher spin_lock_bh(&mp->mib_counters_lock); 1234*527a6266SJeff Kirsher p->good_octets_received += mib_read(mp, 0x00); 1235*527a6266SJeff Kirsher p->bad_octets_received += mib_read(mp, 0x08); 1236*527a6266SJeff Kirsher p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1237*527a6266SJeff Kirsher p->good_frames_received += mib_read(mp, 0x10); 1238*527a6266SJeff Kirsher p->bad_frames_received += mib_read(mp, 0x14); 1239*527a6266SJeff Kirsher p->broadcast_frames_received += mib_read(mp, 0x18); 1240*527a6266SJeff Kirsher p->multicast_frames_received += mib_read(mp, 0x1c); 1241*527a6266SJeff Kirsher p->frames_64_octets += mib_read(mp, 0x20); 1242*527a6266SJeff Kirsher p->frames_65_to_127_octets += mib_read(mp, 0x24); 1243*527a6266SJeff Kirsher p->frames_128_to_255_octets += mib_read(mp, 0x28); 1244*527a6266SJeff Kirsher p->frames_256_to_511_octets += mib_read(mp, 0x2c); 1245*527a6266SJeff Kirsher p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1246*527a6266SJeff Kirsher p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1247*527a6266SJeff Kirsher p->good_octets_sent += mib_read(mp, 0x38); 1248*527a6266SJeff Kirsher p->good_frames_sent += mib_read(mp, 0x40); 1249*527a6266SJeff Kirsher p->excessive_collision += mib_read(mp, 0x44); 1250*527a6266SJeff Kirsher p->multicast_frames_sent += mib_read(mp, 0x48); 1251*527a6266SJeff Kirsher p->broadcast_frames_sent += mib_read(mp, 0x4c); 1252*527a6266SJeff Kirsher p->unrec_mac_control_received += mib_read(mp, 0x50); 1253*527a6266SJeff Kirsher p->fc_sent += mib_read(mp, 0x54); 1254*527a6266SJeff Kirsher p->good_fc_received += mib_read(mp, 0x58); 1255*527a6266SJeff Kirsher p->bad_fc_received += mib_read(mp, 0x5c); 1256*527a6266SJeff Kirsher p->undersize_received += mib_read(mp, 0x60); 1257*527a6266SJeff Kirsher p->fragments_received += mib_read(mp, 0x64); 1258*527a6266SJeff Kirsher p->oversize_received += mib_read(mp, 0x68); 1259*527a6266SJeff Kirsher p->jabber_received += mib_read(mp, 0x6c); 1260*527a6266SJeff Kirsher p->mac_receive_error += mib_read(mp, 0x70); 1261*527a6266SJeff Kirsher p->bad_crc_event += mib_read(mp, 0x74); 1262*527a6266SJeff Kirsher p->collision += mib_read(mp, 0x78); 1263*527a6266SJeff Kirsher p->late_collision += mib_read(mp, 0x7c); 1264*527a6266SJeff Kirsher spin_unlock_bh(&mp->mib_counters_lock); 1265*527a6266SJeff Kirsher 1266*527a6266SJeff Kirsher mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1267*527a6266SJeff Kirsher } 1268*527a6266SJeff Kirsher 1269*527a6266SJeff Kirsher static void mib_counters_timer_wrapper(unsigned long _mp) 1270*527a6266SJeff Kirsher { 1271*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = (void *)_mp; 1272*527a6266SJeff Kirsher 1273*527a6266SJeff Kirsher mib_counters_update(mp); 1274*527a6266SJeff Kirsher } 1275*527a6266SJeff Kirsher 1276*527a6266SJeff Kirsher 1277*527a6266SJeff Kirsher /* interrupt coalescing *****************************************************/ 1278*527a6266SJeff Kirsher /* 1279*527a6266SJeff Kirsher * Hardware coalescing parameters are set in units of 64 t_clk 1280*527a6266SJeff Kirsher * cycles. I.e.: 1281*527a6266SJeff Kirsher * 1282*527a6266SJeff Kirsher * coal_delay_in_usec = 64000000 * register_value / t_clk_rate 1283*527a6266SJeff Kirsher * 1284*527a6266SJeff Kirsher * register_value = coal_delay_in_usec * t_clk_rate / 64000000 1285*527a6266SJeff Kirsher * 1286*527a6266SJeff Kirsher * In the ->set*() methods, we round the computed register value 1287*527a6266SJeff Kirsher * to the nearest integer. 1288*527a6266SJeff Kirsher */ 1289*527a6266SJeff Kirsher static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) 1290*527a6266SJeff Kirsher { 1291*527a6266SJeff Kirsher u32 val = rdlp(mp, SDMA_CONFIG); 1292*527a6266SJeff Kirsher u64 temp; 1293*527a6266SJeff Kirsher 1294*527a6266SJeff Kirsher if (mp->shared->extended_rx_coal_limit) 1295*527a6266SJeff Kirsher temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); 1296*527a6266SJeff Kirsher else 1297*527a6266SJeff Kirsher temp = (val & 0x003fff00) >> 8; 1298*527a6266SJeff Kirsher 1299*527a6266SJeff Kirsher temp *= 64000000; 1300*527a6266SJeff Kirsher do_div(temp, mp->shared->t_clk); 1301*527a6266SJeff Kirsher 1302*527a6266SJeff Kirsher return (unsigned int)temp; 1303*527a6266SJeff Kirsher } 1304*527a6266SJeff Kirsher 1305*527a6266SJeff Kirsher static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1306*527a6266SJeff Kirsher { 1307*527a6266SJeff Kirsher u64 temp; 1308*527a6266SJeff Kirsher u32 val; 1309*527a6266SJeff Kirsher 1310*527a6266SJeff Kirsher temp = (u64)usec * mp->shared->t_clk; 1311*527a6266SJeff Kirsher temp += 31999999; 1312*527a6266SJeff Kirsher do_div(temp, 64000000); 1313*527a6266SJeff Kirsher 1314*527a6266SJeff Kirsher val = rdlp(mp, SDMA_CONFIG); 1315*527a6266SJeff Kirsher if (mp->shared->extended_rx_coal_limit) { 1316*527a6266SJeff Kirsher if (temp > 0xffff) 1317*527a6266SJeff Kirsher temp = 0xffff; 1318*527a6266SJeff Kirsher val &= ~0x023fff80; 1319*527a6266SJeff Kirsher val |= (temp & 0x8000) << 10; 1320*527a6266SJeff Kirsher val |= (temp & 0x7fff) << 7; 1321*527a6266SJeff Kirsher } else { 1322*527a6266SJeff Kirsher if (temp > 0x3fff) 1323*527a6266SJeff Kirsher temp = 0x3fff; 1324*527a6266SJeff Kirsher val &= ~0x003fff00; 1325*527a6266SJeff Kirsher val |= (temp & 0x3fff) << 8; 1326*527a6266SJeff Kirsher } 1327*527a6266SJeff Kirsher wrlp(mp, SDMA_CONFIG, val); 1328*527a6266SJeff Kirsher } 1329*527a6266SJeff Kirsher 1330*527a6266SJeff Kirsher static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) 1331*527a6266SJeff Kirsher { 1332*527a6266SJeff Kirsher u64 temp; 1333*527a6266SJeff Kirsher 1334*527a6266SJeff Kirsher temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1335*527a6266SJeff Kirsher temp *= 64000000; 1336*527a6266SJeff Kirsher do_div(temp, mp->shared->t_clk); 1337*527a6266SJeff Kirsher 1338*527a6266SJeff Kirsher return (unsigned int)temp; 1339*527a6266SJeff Kirsher } 1340*527a6266SJeff Kirsher 1341*527a6266SJeff Kirsher static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1342*527a6266SJeff Kirsher { 1343*527a6266SJeff Kirsher u64 temp; 1344*527a6266SJeff Kirsher 1345*527a6266SJeff Kirsher temp = (u64)usec * mp->shared->t_clk; 1346*527a6266SJeff Kirsher temp += 31999999; 1347*527a6266SJeff Kirsher do_div(temp, 64000000); 1348*527a6266SJeff Kirsher 1349*527a6266SJeff Kirsher if (temp > 0x3fff) 1350*527a6266SJeff Kirsher temp = 0x3fff; 1351*527a6266SJeff Kirsher 1352*527a6266SJeff Kirsher wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); 1353*527a6266SJeff Kirsher } 1354*527a6266SJeff Kirsher 1355*527a6266SJeff Kirsher 1356*527a6266SJeff Kirsher /* ethtool ******************************************************************/ 1357*527a6266SJeff Kirsher struct mv643xx_eth_stats { 1358*527a6266SJeff Kirsher char stat_string[ETH_GSTRING_LEN]; 1359*527a6266SJeff Kirsher int sizeof_stat; 1360*527a6266SJeff Kirsher int netdev_off; 1361*527a6266SJeff Kirsher int mp_off; 1362*527a6266SJeff Kirsher }; 1363*527a6266SJeff Kirsher 1364*527a6266SJeff Kirsher #define SSTAT(m) \ 1365*527a6266SJeff Kirsher { #m, FIELD_SIZEOF(struct net_device_stats, m), \ 1366*527a6266SJeff Kirsher offsetof(struct net_device, stats.m), -1 } 1367*527a6266SJeff Kirsher 1368*527a6266SJeff Kirsher #define MIBSTAT(m) \ 1369*527a6266SJeff Kirsher { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1370*527a6266SJeff Kirsher -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1371*527a6266SJeff Kirsher 1372*527a6266SJeff Kirsher #define LROSTAT(m) \ 1373*527a6266SJeff Kirsher { #m, FIELD_SIZEOF(struct lro_counters, m), \ 1374*527a6266SJeff Kirsher -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } 1375*527a6266SJeff Kirsher 1376*527a6266SJeff Kirsher static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { 1377*527a6266SJeff Kirsher SSTAT(rx_packets), 1378*527a6266SJeff Kirsher SSTAT(tx_packets), 1379*527a6266SJeff Kirsher SSTAT(rx_bytes), 1380*527a6266SJeff Kirsher SSTAT(tx_bytes), 1381*527a6266SJeff Kirsher SSTAT(rx_errors), 1382*527a6266SJeff Kirsher SSTAT(tx_errors), 1383*527a6266SJeff Kirsher SSTAT(rx_dropped), 1384*527a6266SJeff Kirsher SSTAT(tx_dropped), 1385*527a6266SJeff Kirsher MIBSTAT(good_octets_received), 1386*527a6266SJeff Kirsher MIBSTAT(bad_octets_received), 1387*527a6266SJeff Kirsher MIBSTAT(internal_mac_transmit_err), 1388*527a6266SJeff Kirsher MIBSTAT(good_frames_received), 1389*527a6266SJeff Kirsher MIBSTAT(bad_frames_received), 1390*527a6266SJeff Kirsher MIBSTAT(broadcast_frames_received), 1391*527a6266SJeff Kirsher MIBSTAT(multicast_frames_received), 1392*527a6266SJeff Kirsher MIBSTAT(frames_64_octets), 1393*527a6266SJeff Kirsher MIBSTAT(frames_65_to_127_octets), 1394*527a6266SJeff Kirsher MIBSTAT(frames_128_to_255_octets), 1395*527a6266SJeff Kirsher MIBSTAT(frames_256_to_511_octets), 1396*527a6266SJeff Kirsher MIBSTAT(frames_512_to_1023_octets), 1397*527a6266SJeff Kirsher MIBSTAT(frames_1024_to_max_octets), 1398*527a6266SJeff Kirsher MIBSTAT(good_octets_sent), 1399*527a6266SJeff Kirsher MIBSTAT(good_frames_sent), 1400*527a6266SJeff Kirsher MIBSTAT(excessive_collision), 1401*527a6266SJeff Kirsher MIBSTAT(multicast_frames_sent), 1402*527a6266SJeff Kirsher MIBSTAT(broadcast_frames_sent), 1403*527a6266SJeff Kirsher MIBSTAT(unrec_mac_control_received), 1404*527a6266SJeff Kirsher MIBSTAT(fc_sent), 1405*527a6266SJeff Kirsher MIBSTAT(good_fc_received), 1406*527a6266SJeff Kirsher MIBSTAT(bad_fc_received), 1407*527a6266SJeff Kirsher MIBSTAT(undersize_received), 1408*527a6266SJeff Kirsher MIBSTAT(fragments_received), 1409*527a6266SJeff Kirsher MIBSTAT(oversize_received), 1410*527a6266SJeff Kirsher MIBSTAT(jabber_received), 1411*527a6266SJeff Kirsher MIBSTAT(mac_receive_error), 1412*527a6266SJeff Kirsher MIBSTAT(bad_crc_event), 1413*527a6266SJeff Kirsher MIBSTAT(collision), 1414*527a6266SJeff Kirsher MIBSTAT(late_collision), 1415*527a6266SJeff Kirsher LROSTAT(lro_aggregated), 1416*527a6266SJeff Kirsher LROSTAT(lro_flushed), 1417*527a6266SJeff Kirsher LROSTAT(lro_no_desc), 1418*527a6266SJeff Kirsher }; 1419*527a6266SJeff Kirsher 1420*527a6266SJeff Kirsher static int 1421*527a6266SJeff Kirsher mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, 1422*527a6266SJeff Kirsher struct ethtool_cmd *cmd) 1423*527a6266SJeff Kirsher { 1424*527a6266SJeff Kirsher int err; 1425*527a6266SJeff Kirsher 1426*527a6266SJeff Kirsher err = phy_read_status(mp->phy); 1427*527a6266SJeff Kirsher if (err == 0) 1428*527a6266SJeff Kirsher err = phy_ethtool_gset(mp->phy, cmd); 1429*527a6266SJeff Kirsher 1430*527a6266SJeff Kirsher /* 1431*527a6266SJeff Kirsher * The MAC does not support 1000baseT_Half. 1432*527a6266SJeff Kirsher */ 1433*527a6266SJeff Kirsher cmd->supported &= ~SUPPORTED_1000baseT_Half; 1434*527a6266SJeff Kirsher cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1435*527a6266SJeff Kirsher 1436*527a6266SJeff Kirsher return err; 1437*527a6266SJeff Kirsher } 1438*527a6266SJeff Kirsher 1439*527a6266SJeff Kirsher static int 1440*527a6266SJeff Kirsher mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, 1441*527a6266SJeff Kirsher struct ethtool_cmd *cmd) 1442*527a6266SJeff Kirsher { 1443*527a6266SJeff Kirsher u32 port_status; 1444*527a6266SJeff Kirsher 1445*527a6266SJeff Kirsher port_status = rdlp(mp, PORT_STATUS); 1446*527a6266SJeff Kirsher 1447*527a6266SJeff Kirsher cmd->supported = SUPPORTED_MII; 1448*527a6266SJeff Kirsher cmd->advertising = ADVERTISED_MII; 1449*527a6266SJeff Kirsher switch (port_status & PORT_SPEED_MASK) { 1450*527a6266SJeff Kirsher case PORT_SPEED_10: 1451*527a6266SJeff Kirsher ethtool_cmd_speed_set(cmd, SPEED_10); 1452*527a6266SJeff Kirsher break; 1453*527a6266SJeff Kirsher case PORT_SPEED_100: 1454*527a6266SJeff Kirsher ethtool_cmd_speed_set(cmd, SPEED_100); 1455*527a6266SJeff Kirsher break; 1456*527a6266SJeff Kirsher case PORT_SPEED_1000: 1457*527a6266SJeff Kirsher ethtool_cmd_speed_set(cmd, SPEED_1000); 1458*527a6266SJeff Kirsher break; 1459*527a6266SJeff Kirsher default: 1460*527a6266SJeff Kirsher cmd->speed = -1; 1461*527a6266SJeff Kirsher break; 1462*527a6266SJeff Kirsher } 1463*527a6266SJeff Kirsher cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; 1464*527a6266SJeff Kirsher cmd->port = PORT_MII; 1465*527a6266SJeff Kirsher cmd->phy_address = 0; 1466*527a6266SJeff Kirsher cmd->transceiver = XCVR_INTERNAL; 1467*527a6266SJeff Kirsher cmd->autoneg = AUTONEG_DISABLE; 1468*527a6266SJeff Kirsher cmd->maxtxpkt = 1; 1469*527a6266SJeff Kirsher cmd->maxrxpkt = 1; 1470*527a6266SJeff Kirsher 1471*527a6266SJeff Kirsher return 0; 1472*527a6266SJeff Kirsher } 1473*527a6266SJeff Kirsher 1474*527a6266SJeff Kirsher static int 1475*527a6266SJeff Kirsher mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1476*527a6266SJeff Kirsher { 1477*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1478*527a6266SJeff Kirsher 1479*527a6266SJeff Kirsher if (mp->phy != NULL) 1480*527a6266SJeff Kirsher return mv643xx_eth_get_settings_phy(mp, cmd); 1481*527a6266SJeff Kirsher else 1482*527a6266SJeff Kirsher return mv643xx_eth_get_settings_phyless(mp, cmd); 1483*527a6266SJeff Kirsher } 1484*527a6266SJeff Kirsher 1485*527a6266SJeff Kirsher static int 1486*527a6266SJeff Kirsher mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1487*527a6266SJeff Kirsher { 1488*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1489*527a6266SJeff Kirsher 1490*527a6266SJeff Kirsher if (mp->phy == NULL) 1491*527a6266SJeff Kirsher return -EINVAL; 1492*527a6266SJeff Kirsher 1493*527a6266SJeff Kirsher /* 1494*527a6266SJeff Kirsher * The MAC does not support 1000baseT_Half. 1495*527a6266SJeff Kirsher */ 1496*527a6266SJeff Kirsher cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1497*527a6266SJeff Kirsher 1498*527a6266SJeff Kirsher return phy_ethtool_sset(mp->phy, cmd); 1499*527a6266SJeff Kirsher } 1500*527a6266SJeff Kirsher 1501*527a6266SJeff Kirsher static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1502*527a6266SJeff Kirsher struct ethtool_drvinfo *drvinfo) 1503*527a6266SJeff Kirsher { 1504*527a6266SJeff Kirsher strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); 1505*527a6266SJeff Kirsher strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); 1506*527a6266SJeff Kirsher strncpy(drvinfo->fw_version, "N/A", 32); 1507*527a6266SJeff Kirsher strncpy(drvinfo->bus_info, "platform", 32); 1508*527a6266SJeff Kirsher drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); 1509*527a6266SJeff Kirsher } 1510*527a6266SJeff Kirsher 1511*527a6266SJeff Kirsher static int mv643xx_eth_nway_reset(struct net_device *dev) 1512*527a6266SJeff Kirsher { 1513*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1514*527a6266SJeff Kirsher 1515*527a6266SJeff Kirsher if (mp->phy == NULL) 1516*527a6266SJeff Kirsher return -EINVAL; 1517*527a6266SJeff Kirsher 1518*527a6266SJeff Kirsher return genphy_restart_aneg(mp->phy); 1519*527a6266SJeff Kirsher } 1520*527a6266SJeff Kirsher 1521*527a6266SJeff Kirsher static int 1522*527a6266SJeff Kirsher mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1523*527a6266SJeff Kirsher { 1524*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1525*527a6266SJeff Kirsher 1526*527a6266SJeff Kirsher ec->rx_coalesce_usecs = get_rx_coal(mp); 1527*527a6266SJeff Kirsher ec->tx_coalesce_usecs = get_tx_coal(mp); 1528*527a6266SJeff Kirsher 1529*527a6266SJeff Kirsher return 0; 1530*527a6266SJeff Kirsher } 1531*527a6266SJeff Kirsher 1532*527a6266SJeff Kirsher static int 1533*527a6266SJeff Kirsher mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1534*527a6266SJeff Kirsher { 1535*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1536*527a6266SJeff Kirsher 1537*527a6266SJeff Kirsher set_rx_coal(mp, ec->rx_coalesce_usecs); 1538*527a6266SJeff Kirsher set_tx_coal(mp, ec->tx_coalesce_usecs); 1539*527a6266SJeff Kirsher 1540*527a6266SJeff Kirsher return 0; 1541*527a6266SJeff Kirsher } 1542*527a6266SJeff Kirsher 1543*527a6266SJeff Kirsher static void 1544*527a6266SJeff Kirsher mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1545*527a6266SJeff Kirsher { 1546*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1547*527a6266SJeff Kirsher 1548*527a6266SJeff Kirsher er->rx_max_pending = 4096; 1549*527a6266SJeff Kirsher er->tx_max_pending = 4096; 1550*527a6266SJeff Kirsher er->rx_mini_max_pending = 0; 1551*527a6266SJeff Kirsher er->rx_jumbo_max_pending = 0; 1552*527a6266SJeff Kirsher 1553*527a6266SJeff Kirsher er->rx_pending = mp->rx_ring_size; 1554*527a6266SJeff Kirsher er->tx_pending = mp->tx_ring_size; 1555*527a6266SJeff Kirsher er->rx_mini_pending = 0; 1556*527a6266SJeff Kirsher er->rx_jumbo_pending = 0; 1557*527a6266SJeff Kirsher } 1558*527a6266SJeff Kirsher 1559*527a6266SJeff Kirsher static int 1560*527a6266SJeff Kirsher mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1561*527a6266SJeff Kirsher { 1562*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1563*527a6266SJeff Kirsher 1564*527a6266SJeff Kirsher if (er->rx_mini_pending || er->rx_jumbo_pending) 1565*527a6266SJeff Kirsher return -EINVAL; 1566*527a6266SJeff Kirsher 1567*527a6266SJeff Kirsher mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; 1568*527a6266SJeff Kirsher mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; 1569*527a6266SJeff Kirsher 1570*527a6266SJeff Kirsher if (netif_running(dev)) { 1571*527a6266SJeff Kirsher mv643xx_eth_stop(dev); 1572*527a6266SJeff Kirsher if (mv643xx_eth_open(dev)) { 1573*527a6266SJeff Kirsher netdev_err(dev, 1574*527a6266SJeff Kirsher "fatal error on re-opening device after ring param change\n"); 1575*527a6266SJeff Kirsher return -ENOMEM; 1576*527a6266SJeff Kirsher } 1577*527a6266SJeff Kirsher } 1578*527a6266SJeff Kirsher 1579*527a6266SJeff Kirsher return 0; 1580*527a6266SJeff Kirsher } 1581*527a6266SJeff Kirsher 1582*527a6266SJeff Kirsher 1583*527a6266SJeff Kirsher static int 1584*527a6266SJeff Kirsher mv643xx_eth_set_features(struct net_device *dev, u32 features) 1585*527a6266SJeff Kirsher { 1586*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1587*527a6266SJeff Kirsher u32 rx_csum = features & NETIF_F_RXCSUM; 1588*527a6266SJeff Kirsher 1589*527a6266SJeff Kirsher wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); 1590*527a6266SJeff Kirsher 1591*527a6266SJeff Kirsher return 0; 1592*527a6266SJeff Kirsher } 1593*527a6266SJeff Kirsher 1594*527a6266SJeff Kirsher static void mv643xx_eth_get_strings(struct net_device *dev, 1595*527a6266SJeff Kirsher uint32_t stringset, uint8_t *data) 1596*527a6266SJeff Kirsher { 1597*527a6266SJeff Kirsher int i; 1598*527a6266SJeff Kirsher 1599*527a6266SJeff Kirsher if (stringset == ETH_SS_STATS) { 1600*527a6266SJeff Kirsher for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1601*527a6266SJeff Kirsher memcpy(data + i * ETH_GSTRING_LEN, 1602*527a6266SJeff Kirsher mv643xx_eth_stats[i].stat_string, 1603*527a6266SJeff Kirsher ETH_GSTRING_LEN); 1604*527a6266SJeff Kirsher } 1605*527a6266SJeff Kirsher } 1606*527a6266SJeff Kirsher } 1607*527a6266SJeff Kirsher 1608*527a6266SJeff Kirsher static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, 1609*527a6266SJeff Kirsher struct ethtool_stats *stats, 1610*527a6266SJeff Kirsher uint64_t *data) 1611*527a6266SJeff Kirsher { 1612*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1613*527a6266SJeff Kirsher int i; 1614*527a6266SJeff Kirsher 1615*527a6266SJeff Kirsher mv643xx_eth_get_stats(dev); 1616*527a6266SJeff Kirsher mib_counters_update(mp); 1617*527a6266SJeff Kirsher mv643xx_eth_grab_lro_stats(mp); 1618*527a6266SJeff Kirsher 1619*527a6266SJeff Kirsher for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1620*527a6266SJeff Kirsher const struct mv643xx_eth_stats *stat; 1621*527a6266SJeff Kirsher void *p; 1622*527a6266SJeff Kirsher 1623*527a6266SJeff Kirsher stat = mv643xx_eth_stats + i; 1624*527a6266SJeff Kirsher 1625*527a6266SJeff Kirsher if (stat->netdev_off >= 0) 1626*527a6266SJeff Kirsher p = ((void *)mp->dev) + stat->netdev_off; 1627*527a6266SJeff Kirsher else 1628*527a6266SJeff Kirsher p = ((void *)mp) + stat->mp_off; 1629*527a6266SJeff Kirsher 1630*527a6266SJeff Kirsher data[i] = (stat->sizeof_stat == 8) ? 1631*527a6266SJeff Kirsher *(uint64_t *)p : *(uint32_t *)p; 1632*527a6266SJeff Kirsher } 1633*527a6266SJeff Kirsher } 1634*527a6266SJeff Kirsher 1635*527a6266SJeff Kirsher static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) 1636*527a6266SJeff Kirsher { 1637*527a6266SJeff Kirsher if (sset == ETH_SS_STATS) 1638*527a6266SJeff Kirsher return ARRAY_SIZE(mv643xx_eth_stats); 1639*527a6266SJeff Kirsher 1640*527a6266SJeff Kirsher return -EOPNOTSUPP; 1641*527a6266SJeff Kirsher } 1642*527a6266SJeff Kirsher 1643*527a6266SJeff Kirsher static const struct ethtool_ops mv643xx_eth_ethtool_ops = { 1644*527a6266SJeff Kirsher .get_settings = mv643xx_eth_get_settings, 1645*527a6266SJeff Kirsher .set_settings = mv643xx_eth_set_settings, 1646*527a6266SJeff Kirsher .get_drvinfo = mv643xx_eth_get_drvinfo, 1647*527a6266SJeff Kirsher .nway_reset = mv643xx_eth_nway_reset, 1648*527a6266SJeff Kirsher .get_link = ethtool_op_get_link, 1649*527a6266SJeff Kirsher .get_coalesce = mv643xx_eth_get_coalesce, 1650*527a6266SJeff Kirsher .set_coalesce = mv643xx_eth_set_coalesce, 1651*527a6266SJeff Kirsher .get_ringparam = mv643xx_eth_get_ringparam, 1652*527a6266SJeff Kirsher .set_ringparam = mv643xx_eth_set_ringparam, 1653*527a6266SJeff Kirsher .get_strings = mv643xx_eth_get_strings, 1654*527a6266SJeff Kirsher .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1655*527a6266SJeff Kirsher .get_sset_count = mv643xx_eth_get_sset_count, 1656*527a6266SJeff Kirsher }; 1657*527a6266SJeff Kirsher 1658*527a6266SJeff Kirsher 1659*527a6266SJeff Kirsher /* address handling *********************************************************/ 1660*527a6266SJeff Kirsher static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) 1661*527a6266SJeff Kirsher { 1662*527a6266SJeff Kirsher unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); 1663*527a6266SJeff Kirsher unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); 1664*527a6266SJeff Kirsher 1665*527a6266SJeff Kirsher addr[0] = (mac_h >> 24) & 0xff; 1666*527a6266SJeff Kirsher addr[1] = (mac_h >> 16) & 0xff; 1667*527a6266SJeff Kirsher addr[2] = (mac_h >> 8) & 0xff; 1668*527a6266SJeff Kirsher addr[3] = mac_h & 0xff; 1669*527a6266SJeff Kirsher addr[4] = (mac_l >> 8) & 0xff; 1670*527a6266SJeff Kirsher addr[5] = mac_l & 0xff; 1671*527a6266SJeff Kirsher } 1672*527a6266SJeff Kirsher 1673*527a6266SJeff Kirsher static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) 1674*527a6266SJeff Kirsher { 1675*527a6266SJeff Kirsher wrlp(mp, MAC_ADDR_HIGH, 1676*527a6266SJeff Kirsher (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); 1677*527a6266SJeff Kirsher wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); 1678*527a6266SJeff Kirsher } 1679*527a6266SJeff Kirsher 1680*527a6266SJeff Kirsher static u32 uc_addr_filter_mask(struct net_device *dev) 1681*527a6266SJeff Kirsher { 1682*527a6266SJeff Kirsher struct netdev_hw_addr *ha; 1683*527a6266SJeff Kirsher u32 nibbles; 1684*527a6266SJeff Kirsher 1685*527a6266SJeff Kirsher if (dev->flags & IFF_PROMISC) 1686*527a6266SJeff Kirsher return 0; 1687*527a6266SJeff Kirsher 1688*527a6266SJeff Kirsher nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1689*527a6266SJeff Kirsher netdev_for_each_uc_addr(ha, dev) { 1690*527a6266SJeff Kirsher if (memcmp(dev->dev_addr, ha->addr, 5)) 1691*527a6266SJeff Kirsher return 0; 1692*527a6266SJeff Kirsher if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1693*527a6266SJeff Kirsher return 0; 1694*527a6266SJeff Kirsher 1695*527a6266SJeff Kirsher nibbles |= 1 << (ha->addr[5] & 0x0f); 1696*527a6266SJeff Kirsher } 1697*527a6266SJeff Kirsher 1698*527a6266SJeff Kirsher return nibbles; 1699*527a6266SJeff Kirsher } 1700*527a6266SJeff Kirsher 1701*527a6266SJeff Kirsher static void mv643xx_eth_program_unicast_filter(struct net_device *dev) 1702*527a6266SJeff Kirsher { 1703*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1704*527a6266SJeff Kirsher u32 port_config; 1705*527a6266SJeff Kirsher u32 nibbles; 1706*527a6266SJeff Kirsher int i; 1707*527a6266SJeff Kirsher 1708*527a6266SJeff Kirsher uc_addr_set(mp, dev->dev_addr); 1709*527a6266SJeff Kirsher 1710*527a6266SJeff Kirsher port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; 1711*527a6266SJeff Kirsher 1712*527a6266SJeff Kirsher nibbles = uc_addr_filter_mask(dev); 1713*527a6266SJeff Kirsher if (!nibbles) { 1714*527a6266SJeff Kirsher port_config |= UNICAST_PROMISCUOUS_MODE; 1715*527a6266SJeff Kirsher nibbles = 0xffff; 1716*527a6266SJeff Kirsher } 1717*527a6266SJeff Kirsher 1718*527a6266SJeff Kirsher for (i = 0; i < 16; i += 4) { 1719*527a6266SJeff Kirsher int off = UNICAST_TABLE(mp->port_num) + i; 1720*527a6266SJeff Kirsher u32 v; 1721*527a6266SJeff Kirsher 1722*527a6266SJeff Kirsher v = 0; 1723*527a6266SJeff Kirsher if (nibbles & 1) 1724*527a6266SJeff Kirsher v |= 0x00000001; 1725*527a6266SJeff Kirsher if (nibbles & 2) 1726*527a6266SJeff Kirsher v |= 0x00000100; 1727*527a6266SJeff Kirsher if (nibbles & 4) 1728*527a6266SJeff Kirsher v |= 0x00010000; 1729*527a6266SJeff Kirsher if (nibbles & 8) 1730*527a6266SJeff Kirsher v |= 0x01000000; 1731*527a6266SJeff Kirsher nibbles >>= 4; 1732*527a6266SJeff Kirsher 1733*527a6266SJeff Kirsher wrl(mp, off, v); 1734*527a6266SJeff Kirsher } 1735*527a6266SJeff Kirsher 1736*527a6266SJeff Kirsher wrlp(mp, PORT_CONFIG, port_config); 1737*527a6266SJeff Kirsher } 1738*527a6266SJeff Kirsher 1739*527a6266SJeff Kirsher static int addr_crc(unsigned char *addr) 1740*527a6266SJeff Kirsher { 1741*527a6266SJeff Kirsher int crc = 0; 1742*527a6266SJeff Kirsher int i; 1743*527a6266SJeff Kirsher 1744*527a6266SJeff Kirsher for (i = 0; i < 6; i++) { 1745*527a6266SJeff Kirsher int j; 1746*527a6266SJeff Kirsher 1747*527a6266SJeff Kirsher crc = (crc ^ addr[i]) << 8; 1748*527a6266SJeff Kirsher for (j = 7; j >= 0; j--) { 1749*527a6266SJeff Kirsher if (crc & (0x100 << j)) 1750*527a6266SJeff Kirsher crc ^= 0x107 << j; 1751*527a6266SJeff Kirsher } 1752*527a6266SJeff Kirsher } 1753*527a6266SJeff Kirsher 1754*527a6266SJeff Kirsher return crc; 1755*527a6266SJeff Kirsher } 1756*527a6266SJeff Kirsher 1757*527a6266SJeff Kirsher static void mv643xx_eth_program_multicast_filter(struct net_device *dev) 1758*527a6266SJeff Kirsher { 1759*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 1760*527a6266SJeff Kirsher u32 *mc_spec; 1761*527a6266SJeff Kirsher u32 *mc_other; 1762*527a6266SJeff Kirsher struct netdev_hw_addr *ha; 1763*527a6266SJeff Kirsher int i; 1764*527a6266SJeff Kirsher 1765*527a6266SJeff Kirsher if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1766*527a6266SJeff Kirsher int port_num; 1767*527a6266SJeff Kirsher u32 accept; 1768*527a6266SJeff Kirsher 1769*527a6266SJeff Kirsher oom: 1770*527a6266SJeff Kirsher port_num = mp->port_num; 1771*527a6266SJeff Kirsher accept = 0x01010101; 1772*527a6266SJeff Kirsher for (i = 0; i < 0x100; i += 4) { 1773*527a6266SJeff Kirsher wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); 1774*527a6266SJeff Kirsher wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); 1775*527a6266SJeff Kirsher } 1776*527a6266SJeff Kirsher return; 1777*527a6266SJeff Kirsher } 1778*527a6266SJeff Kirsher 1779*527a6266SJeff Kirsher mc_spec = kmalloc(0x200, GFP_ATOMIC); 1780*527a6266SJeff Kirsher if (mc_spec == NULL) 1781*527a6266SJeff Kirsher goto oom; 1782*527a6266SJeff Kirsher mc_other = mc_spec + (0x100 >> 2); 1783*527a6266SJeff Kirsher 1784*527a6266SJeff Kirsher memset(mc_spec, 0, 0x100); 1785*527a6266SJeff Kirsher memset(mc_other, 0, 0x100); 1786*527a6266SJeff Kirsher 1787*527a6266SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 1788*527a6266SJeff Kirsher u8 *a = ha->addr; 1789*527a6266SJeff Kirsher u32 *table; 1790*527a6266SJeff Kirsher int entry; 1791*527a6266SJeff Kirsher 1792*527a6266SJeff Kirsher if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { 1793*527a6266SJeff Kirsher table = mc_spec; 1794*527a6266SJeff Kirsher entry = a[5]; 1795*527a6266SJeff Kirsher } else { 1796*527a6266SJeff Kirsher table = mc_other; 1797*527a6266SJeff Kirsher entry = addr_crc(a); 1798*527a6266SJeff Kirsher } 1799*527a6266SJeff Kirsher 1800*527a6266SJeff Kirsher table[entry >> 2] |= 1 << (8 * (entry & 3)); 1801*527a6266SJeff Kirsher } 1802*527a6266SJeff Kirsher 1803*527a6266SJeff Kirsher for (i = 0; i < 0x100; i += 4) { 1804*527a6266SJeff Kirsher wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); 1805*527a6266SJeff Kirsher wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); 1806*527a6266SJeff Kirsher } 1807*527a6266SJeff Kirsher 1808*527a6266SJeff Kirsher kfree(mc_spec); 1809*527a6266SJeff Kirsher } 1810*527a6266SJeff Kirsher 1811*527a6266SJeff Kirsher static void mv643xx_eth_set_rx_mode(struct net_device *dev) 1812*527a6266SJeff Kirsher { 1813*527a6266SJeff Kirsher mv643xx_eth_program_unicast_filter(dev); 1814*527a6266SJeff Kirsher mv643xx_eth_program_multicast_filter(dev); 1815*527a6266SJeff Kirsher } 1816*527a6266SJeff Kirsher 1817*527a6266SJeff Kirsher static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) 1818*527a6266SJeff Kirsher { 1819*527a6266SJeff Kirsher struct sockaddr *sa = addr; 1820*527a6266SJeff Kirsher 1821*527a6266SJeff Kirsher if (!is_valid_ether_addr(sa->sa_data)) 1822*527a6266SJeff Kirsher return -EINVAL; 1823*527a6266SJeff Kirsher 1824*527a6266SJeff Kirsher memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); 1825*527a6266SJeff Kirsher 1826*527a6266SJeff Kirsher netif_addr_lock_bh(dev); 1827*527a6266SJeff Kirsher mv643xx_eth_program_unicast_filter(dev); 1828*527a6266SJeff Kirsher netif_addr_unlock_bh(dev); 1829*527a6266SJeff Kirsher 1830*527a6266SJeff Kirsher return 0; 1831*527a6266SJeff Kirsher } 1832*527a6266SJeff Kirsher 1833*527a6266SJeff Kirsher 1834*527a6266SJeff Kirsher /* rx/tx queue initialisation ***********************************************/ 1835*527a6266SJeff Kirsher static int rxq_init(struct mv643xx_eth_private *mp, int index) 1836*527a6266SJeff Kirsher { 1837*527a6266SJeff Kirsher struct rx_queue *rxq = mp->rxq + index; 1838*527a6266SJeff Kirsher struct rx_desc *rx_desc; 1839*527a6266SJeff Kirsher int size; 1840*527a6266SJeff Kirsher int i; 1841*527a6266SJeff Kirsher 1842*527a6266SJeff Kirsher rxq->index = index; 1843*527a6266SJeff Kirsher 1844*527a6266SJeff Kirsher rxq->rx_ring_size = mp->rx_ring_size; 1845*527a6266SJeff Kirsher 1846*527a6266SJeff Kirsher rxq->rx_desc_count = 0; 1847*527a6266SJeff Kirsher rxq->rx_curr_desc = 0; 1848*527a6266SJeff Kirsher rxq->rx_used_desc = 0; 1849*527a6266SJeff Kirsher 1850*527a6266SJeff Kirsher size = rxq->rx_ring_size * sizeof(struct rx_desc); 1851*527a6266SJeff Kirsher 1852*527a6266SJeff Kirsher if (index == 0 && size <= mp->rx_desc_sram_size) { 1853*527a6266SJeff Kirsher rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1854*527a6266SJeff Kirsher mp->rx_desc_sram_size); 1855*527a6266SJeff Kirsher rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1856*527a6266SJeff Kirsher } else { 1857*527a6266SJeff Kirsher rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1858*527a6266SJeff Kirsher size, &rxq->rx_desc_dma, 1859*527a6266SJeff Kirsher GFP_KERNEL); 1860*527a6266SJeff Kirsher } 1861*527a6266SJeff Kirsher 1862*527a6266SJeff Kirsher if (rxq->rx_desc_area == NULL) { 1863*527a6266SJeff Kirsher netdev_err(mp->dev, 1864*527a6266SJeff Kirsher "can't allocate rx ring (%d bytes)\n", size); 1865*527a6266SJeff Kirsher goto out; 1866*527a6266SJeff Kirsher } 1867*527a6266SJeff Kirsher memset(rxq->rx_desc_area, 0, size); 1868*527a6266SJeff Kirsher 1869*527a6266SJeff Kirsher rxq->rx_desc_area_size = size; 1870*527a6266SJeff Kirsher rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1871*527a6266SJeff Kirsher GFP_KERNEL); 1872*527a6266SJeff Kirsher if (rxq->rx_skb == NULL) { 1873*527a6266SJeff Kirsher netdev_err(mp->dev, "can't allocate rx skb ring\n"); 1874*527a6266SJeff Kirsher goto out_free; 1875*527a6266SJeff Kirsher } 1876*527a6266SJeff Kirsher 1877*527a6266SJeff Kirsher rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1878*527a6266SJeff Kirsher for (i = 0; i < rxq->rx_ring_size; i++) { 1879*527a6266SJeff Kirsher int nexti; 1880*527a6266SJeff Kirsher 1881*527a6266SJeff Kirsher nexti = i + 1; 1882*527a6266SJeff Kirsher if (nexti == rxq->rx_ring_size) 1883*527a6266SJeff Kirsher nexti = 0; 1884*527a6266SJeff Kirsher 1885*527a6266SJeff Kirsher rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 1886*527a6266SJeff Kirsher nexti * sizeof(struct rx_desc); 1887*527a6266SJeff Kirsher } 1888*527a6266SJeff Kirsher 1889*527a6266SJeff Kirsher rxq->lro_mgr.dev = mp->dev; 1890*527a6266SJeff Kirsher memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); 1891*527a6266SJeff Kirsher rxq->lro_mgr.features = LRO_F_NAPI; 1892*527a6266SJeff Kirsher rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1893*527a6266SJeff Kirsher rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1894*527a6266SJeff Kirsher rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); 1895*527a6266SJeff Kirsher rxq->lro_mgr.max_aggr = 32; 1896*527a6266SJeff Kirsher rxq->lro_mgr.frag_align_pad = 0; 1897*527a6266SJeff Kirsher rxq->lro_mgr.lro_arr = rxq->lro_arr; 1898*527a6266SJeff Kirsher rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; 1899*527a6266SJeff Kirsher 1900*527a6266SJeff Kirsher memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); 1901*527a6266SJeff Kirsher 1902*527a6266SJeff Kirsher return 0; 1903*527a6266SJeff Kirsher 1904*527a6266SJeff Kirsher 1905*527a6266SJeff Kirsher out_free: 1906*527a6266SJeff Kirsher if (index == 0 && size <= mp->rx_desc_sram_size) 1907*527a6266SJeff Kirsher iounmap(rxq->rx_desc_area); 1908*527a6266SJeff Kirsher else 1909*527a6266SJeff Kirsher dma_free_coherent(mp->dev->dev.parent, size, 1910*527a6266SJeff Kirsher rxq->rx_desc_area, 1911*527a6266SJeff Kirsher rxq->rx_desc_dma); 1912*527a6266SJeff Kirsher 1913*527a6266SJeff Kirsher out: 1914*527a6266SJeff Kirsher return -ENOMEM; 1915*527a6266SJeff Kirsher } 1916*527a6266SJeff Kirsher 1917*527a6266SJeff Kirsher static void rxq_deinit(struct rx_queue *rxq) 1918*527a6266SJeff Kirsher { 1919*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 1920*527a6266SJeff Kirsher int i; 1921*527a6266SJeff Kirsher 1922*527a6266SJeff Kirsher rxq_disable(rxq); 1923*527a6266SJeff Kirsher 1924*527a6266SJeff Kirsher for (i = 0; i < rxq->rx_ring_size; i++) { 1925*527a6266SJeff Kirsher if (rxq->rx_skb[i]) { 1926*527a6266SJeff Kirsher dev_kfree_skb(rxq->rx_skb[i]); 1927*527a6266SJeff Kirsher rxq->rx_desc_count--; 1928*527a6266SJeff Kirsher } 1929*527a6266SJeff Kirsher } 1930*527a6266SJeff Kirsher 1931*527a6266SJeff Kirsher if (rxq->rx_desc_count) { 1932*527a6266SJeff Kirsher netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", 1933*527a6266SJeff Kirsher rxq->rx_desc_count); 1934*527a6266SJeff Kirsher } 1935*527a6266SJeff Kirsher 1936*527a6266SJeff Kirsher if (rxq->index == 0 && 1937*527a6266SJeff Kirsher rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1938*527a6266SJeff Kirsher iounmap(rxq->rx_desc_area); 1939*527a6266SJeff Kirsher else 1940*527a6266SJeff Kirsher dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, 1941*527a6266SJeff Kirsher rxq->rx_desc_area, rxq->rx_desc_dma); 1942*527a6266SJeff Kirsher 1943*527a6266SJeff Kirsher kfree(rxq->rx_skb); 1944*527a6266SJeff Kirsher } 1945*527a6266SJeff Kirsher 1946*527a6266SJeff Kirsher static int txq_init(struct mv643xx_eth_private *mp, int index) 1947*527a6266SJeff Kirsher { 1948*527a6266SJeff Kirsher struct tx_queue *txq = mp->txq + index; 1949*527a6266SJeff Kirsher struct tx_desc *tx_desc; 1950*527a6266SJeff Kirsher int size; 1951*527a6266SJeff Kirsher int i; 1952*527a6266SJeff Kirsher 1953*527a6266SJeff Kirsher txq->index = index; 1954*527a6266SJeff Kirsher 1955*527a6266SJeff Kirsher txq->tx_ring_size = mp->tx_ring_size; 1956*527a6266SJeff Kirsher 1957*527a6266SJeff Kirsher txq->tx_desc_count = 0; 1958*527a6266SJeff Kirsher txq->tx_curr_desc = 0; 1959*527a6266SJeff Kirsher txq->tx_used_desc = 0; 1960*527a6266SJeff Kirsher 1961*527a6266SJeff Kirsher size = txq->tx_ring_size * sizeof(struct tx_desc); 1962*527a6266SJeff Kirsher 1963*527a6266SJeff Kirsher if (index == 0 && size <= mp->tx_desc_sram_size) { 1964*527a6266SJeff Kirsher txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 1965*527a6266SJeff Kirsher mp->tx_desc_sram_size); 1966*527a6266SJeff Kirsher txq->tx_desc_dma = mp->tx_desc_sram_addr; 1967*527a6266SJeff Kirsher } else { 1968*527a6266SJeff Kirsher txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1969*527a6266SJeff Kirsher size, &txq->tx_desc_dma, 1970*527a6266SJeff Kirsher GFP_KERNEL); 1971*527a6266SJeff Kirsher } 1972*527a6266SJeff Kirsher 1973*527a6266SJeff Kirsher if (txq->tx_desc_area == NULL) { 1974*527a6266SJeff Kirsher netdev_err(mp->dev, 1975*527a6266SJeff Kirsher "can't allocate tx ring (%d bytes)\n", size); 1976*527a6266SJeff Kirsher return -ENOMEM; 1977*527a6266SJeff Kirsher } 1978*527a6266SJeff Kirsher memset(txq->tx_desc_area, 0, size); 1979*527a6266SJeff Kirsher 1980*527a6266SJeff Kirsher txq->tx_desc_area_size = size; 1981*527a6266SJeff Kirsher 1982*527a6266SJeff Kirsher tx_desc = (struct tx_desc *)txq->tx_desc_area; 1983*527a6266SJeff Kirsher for (i = 0; i < txq->tx_ring_size; i++) { 1984*527a6266SJeff Kirsher struct tx_desc *txd = tx_desc + i; 1985*527a6266SJeff Kirsher int nexti; 1986*527a6266SJeff Kirsher 1987*527a6266SJeff Kirsher nexti = i + 1; 1988*527a6266SJeff Kirsher if (nexti == txq->tx_ring_size) 1989*527a6266SJeff Kirsher nexti = 0; 1990*527a6266SJeff Kirsher 1991*527a6266SJeff Kirsher txd->cmd_sts = 0; 1992*527a6266SJeff Kirsher txd->next_desc_ptr = txq->tx_desc_dma + 1993*527a6266SJeff Kirsher nexti * sizeof(struct tx_desc); 1994*527a6266SJeff Kirsher } 1995*527a6266SJeff Kirsher 1996*527a6266SJeff Kirsher skb_queue_head_init(&txq->tx_skb); 1997*527a6266SJeff Kirsher 1998*527a6266SJeff Kirsher return 0; 1999*527a6266SJeff Kirsher } 2000*527a6266SJeff Kirsher 2001*527a6266SJeff Kirsher static void txq_deinit(struct tx_queue *txq) 2002*527a6266SJeff Kirsher { 2003*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = txq_to_mp(txq); 2004*527a6266SJeff Kirsher 2005*527a6266SJeff Kirsher txq_disable(txq); 2006*527a6266SJeff Kirsher txq_reclaim(txq, txq->tx_ring_size, 1); 2007*527a6266SJeff Kirsher 2008*527a6266SJeff Kirsher BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 2009*527a6266SJeff Kirsher 2010*527a6266SJeff Kirsher if (txq->index == 0 && 2011*527a6266SJeff Kirsher txq->tx_desc_area_size <= mp->tx_desc_sram_size) 2012*527a6266SJeff Kirsher iounmap(txq->tx_desc_area); 2013*527a6266SJeff Kirsher else 2014*527a6266SJeff Kirsher dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2015*527a6266SJeff Kirsher txq->tx_desc_area, txq->tx_desc_dma); 2016*527a6266SJeff Kirsher } 2017*527a6266SJeff Kirsher 2018*527a6266SJeff Kirsher 2019*527a6266SJeff Kirsher /* netdev ops and related ***************************************************/ 2020*527a6266SJeff Kirsher static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) 2021*527a6266SJeff Kirsher { 2022*527a6266SJeff Kirsher u32 int_cause; 2023*527a6266SJeff Kirsher u32 int_cause_ext; 2024*527a6266SJeff Kirsher 2025*527a6266SJeff Kirsher int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; 2026*527a6266SJeff Kirsher if (int_cause == 0) 2027*527a6266SJeff Kirsher return 0; 2028*527a6266SJeff Kirsher 2029*527a6266SJeff Kirsher int_cause_ext = 0; 2030*527a6266SJeff Kirsher if (int_cause & INT_EXT) { 2031*527a6266SJeff Kirsher int_cause &= ~INT_EXT; 2032*527a6266SJeff Kirsher int_cause_ext = rdlp(mp, INT_CAUSE_EXT); 2033*527a6266SJeff Kirsher } 2034*527a6266SJeff Kirsher 2035*527a6266SJeff Kirsher if (int_cause) { 2036*527a6266SJeff Kirsher wrlp(mp, INT_CAUSE, ~int_cause); 2037*527a6266SJeff Kirsher mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 2038*527a6266SJeff Kirsher ~(rdlp(mp, TXQ_COMMAND) & 0xff); 2039*527a6266SJeff Kirsher mp->work_rx |= (int_cause & INT_RX) >> 2; 2040*527a6266SJeff Kirsher } 2041*527a6266SJeff Kirsher 2042*527a6266SJeff Kirsher int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 2043*527a6266SJeff Kirsher if (int_cause_ext) { 2044*527a6266SJeff Kirsher wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); 2045*527a6266SJeff Kirsher if (int_cause_ext & INT_EXT_LINK_PHY) 2046*527a6266SJeff Kirsher mp->work_link = 1; 2047*527a6266SJeff Kirsher mp->work_tx |= int_cause_ext & INT_EXT_TX; 2048*527a6266SJeff Kirsher } 2049*527a6266SJeff Kirsher 2050*527a6266SJeff Kirsher return 1; 2051*527a6266SJeff Kirsher } 2052*527a6266SJeff Kirsher 2053*527a6266SJeff Kirsher static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 2054*527a6266SJeff Kirsher { 2055*527a6266SJeff Kirsher struct net_device *dev = (struct net_device *)dev_id; 2056*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 2057*527a6266SJeff Kirsher 2058*527a6266SJeff Kirsher if (unlikely(!mv643xx_eth_collect_events(mp))) 2059*527a6266SJeff Kirsher return IRQ_NONE; 2060*527a6266SJeff Kirsher 2061*527a6266SJeff Kirsher wrlp(mp, INT_MASK, 0); 2062*527a6266SJeff Kirsher napi_schedule(&mp->napi); 2063*527a6266SJeff Kirsher 2064*527a6266SJeff Kirsher return IRQ_HANDLED; 2065*527a6266SJeff Kirsher } 2066*527a6266SJeff Kirsher 2067*527a6266SJeff Kirsher static void handle_link_event(struct mv643xx_eth_private *mp) 2068*527a6266SJeff Kirsher { 2069*527a6266SJeff Kirsher struct net_device *dev = mp->dev; 2070*527a6266SJeff Kirsher u32 port_status; 2071*527a6266SJeff Kirsher int speed; 2072*527a6266SJeff Kirsher int duplex; 2073*527a6266SJeff Kirsher int fc; 2074*527a6266SJeff Kirsher 2075*527a6266SJeff Kirsher port_status = rdlp(mp, PORT_STATUS); 2076*527a6266SJeff Kirsher if (!(port_status & LINK_UP)) { 2077*527a6266SJeff Kirsher if (netif_carrier_ok(dev)) { 2078*527a6266SJeff Kirsher int i; 2079*527a6266SJeff Kirsher 2080*527a6266SJeff Kirsher netdev_info(dev, "link down\n"); 2081*527a6266SJeff Kirsher 2082*527a6266SJeff Kirsher netif_carrier_off(dev); 2083*527a6266SJeff Kirsher 2084*527a6266SJeff Kirsher for (i = 0; i < mp->txq_count; i++) { 2085*527a6266SJeff Kirsher struct tx_queue *txq = mp->txq + i; 2086*527a6266SJeff Kirsher 2087*527a6266SJeff Kirsher txq_reclaim(txq, txq->tx_ring_size, 1); 2088*527a6266SJeff Kirsher txq_reset_hw_ptr(txq); 2089*527a6266SJeff Kirsher } 2090*527a6266SJeff Kirsher } 2091*527a6266SJeff Kirsher return; 2092*527a6266SJeff Kirsher } 2093*527a6266SJeff Kirsher 2094*527a6266SJeff Kirsher switch (port_status & PORT_SPEED_MASK) { 2095*527a6266SJeff Kirsher case PORT_SPEED_10: 2096*527a6266SJeff Kirsher speed = 10; 2097*527a6266SJeff Kirsher break; 2098*527a6266SJeff Kirsher case PORT_SPEED_100: 2099*527a6266SJeff Kirsher speed = 100; 2100*527a6266SJeff Kirsher break; 2101*527a6266SJeff Kirsher case PORT_SPEED_1000: 2102*527a6266SJeff Kirsher speed = 1000; 2103*527a6266SJeff Kirsher break; 2104*527a6266SJeff Kirsher default: 2105*527a6266SJeff Kirsher speed = -1; 2106*527a6266SJeff Kirsher break; 2107*527a6266SJeff Kirsher } 2108*527a6266SJeff Kirsher duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2109*527a6266SJeff Kirsher fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2110*527a6266SJeff Kirsher 2111*527a6266SJeff Kirsher netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", 2112*527a6266SJeff Kirsher speed, duplex ? "full" : "half", fc ? "en" : "dis"); 2113*527a6266SJeff Kirsher 2114*527a6266SJeff Kirsher if (!netif_carrier_ok(dev)) 2115*527a6266SJeff Kirsher netif_carrier_on(dev); 2116*527a6266SJeff Kirsher } 2117*527a6266SJeff Kirsher 2118*527a6266SJeff Kirsher static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 2119*527a6266SJeff Kirsher { 2120*527a6266SJeff Kirsher struct mv643xx_eth_private *mp; 2121*527a6266SJeff Kirsher int work_done; 2122*527a6266SJeff Kirsher 2123*527a6266SJeff Kirsher mp = container_of(napi, struct mv643xx_eth_private, napi); 2124*527a6266SJeff Kirsher 2125*527a6266SJeff Kirsher if (unlikely(mp->oom)) { 2126*527a6266SJeff Kirsher mp->oom = 0; 2127*527a6266SJeff Kirsher del_timer(&mp->rx_oom); 2128*527a6266SJeff Kirsher } 2129*527a6266SJeff Kirsher 2130*527a6266SJeff Kirsher work_done = 0; 2131*527a6266SJeff Kirsher while (work_done < budget) { 2132*527a6266SJeff Kirsher u8 queue_mask; 2133*527a6266SJeff Kirsher int queue; 2134*527a6266SJeff Kirsher int work_tbd; 2135*527a6266SJeff Kirsher 2136*527a6266SJeff Kirsher if (mp->work_link) { 2137*527a6266SJeff Kirsher mp->work_link = 0; 2138*527a6266SJeff Kirsher handle_link_event(mp); 2139*527a6266SJeff Kirsher work_done++; 2140*527a6266SJeff Kirsher continue; 2141*527a6266SJeff Kirsher } 2142*527a6266SJeff Kirsher 2143*527a6266SJeff Kirsher queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; 2144*527a6266SJeff Kirsher if (likely(!mp->oom)) 2145*527a6266SJeff Kirsher queue_mask |= mp->work_rx_refill; 2146*527a6266SJeff Kirsher 2147*527a6266SJeff Kirsher if (!queue_mask) { 2148*527a6266SJeff Kirsher if (mv643xx_eth_collect_events(mp)) 2149*527a6266SJeff Kirsher continue; 2150*527a6266SJeff Kirsher break; 2151*527a6266SJeff Kirsher } 2152*527a6266SJeff Kirsher 2153*527a6266SJeff Kirsher queue = fls(queue_mask) - 1; 2154*527a6266SJeff Kirsher queue_mask = 1 << queue; 2155*527a6266SJeff Kirsher 2156*527a6266SJeff Kirsher work_tbd = budget - work_done; 2157*527a6266SJeff Kirsher if (work_tbd > 16) 2158*527a6266SJeff Kirsher work_tbd = 16; 2159*527a6266SJeff Kirsher 2160*527a6266SJeff Kirsher if (mp->work_tx_end & queue_mask) { 2161*527a6266SJeff Kirsher txq_kick(mp->txq + queue); 2162*527a6266SJeff Kirsher } else if (mp->work_tx & queue_mask) { 2163*527a6266SJeff Kirsher work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); 2164*527a6266SJeff Kirsher txq_maybe_wake(mp->txq + queue); 2165*527a6266SJeff Kirsher } else if (mp->work_rx & queue_mask) { 2166*527a6266SJeff Kirsher work_done += rxq_process(mp->rxq + queue, work_tbd); 2167*527a6266SJeff Kirsher } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { 2168*527a6266SJeff Kirsher work_done += rxq_refill(mp->rxq + queue, work_tbd); 2169*527a6266SJeff Kirsher } else { 2170*527a6266SJeff Kirsher BUG(); 2171*527a6266SJeff Kirsher } 2172*527a6266SJeff Kirsher } 2173*527a6266SJeff Kirsher 2174*527a6266SJeff Kirsher if (work_done < budget) { 2175*527a6266SJeff Kirsher if (mp->oom) 2176*527a6266SJeff Kirsher mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2177*527a6266SJeff Kirsher napi_complete(napi); 2178*527a6266SJeff Kirsher wrlp(mp, INT_MASK, mp->int_mask); 2179*527a6266SJeff Kirsher } 2180*527a6266SJeff Kirsher 2181*527a6266SJeff Kirsher return work_done; 2182*527a6266SJeff Kirsher } 2183*527a6266SJeff Kirsher 2184*527a6266SJeff Kirsher static inline void oom_timer_wrapper(unsigned long data) 2185*527a6266SJeff Kirsher { 2186*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = (void *)data; 2187*527a6266SJeff Kirsher 2188*527a6266SJeff Kirsher napi_schedule(&mp->napi); 2189*527a6266SJeff Kirsher } 2190*527a6266SJeff Kirsher 2191*527a6266SJeff Kirsher static void phy_reset(struct mv643xx_eth_private *mp) 2192*527a6266SJeff Kirsher { 2193*527a6266SJeff Kirsher int data; 2194*527a6266SJeff Kirsher 2195*527a6266SJeff Kirsher data = phy_read(mp->phy, MII_BMCR); 2196*527a6266SJeff Kirsher if (data < 0) 2197*527a6266SJeff Kirsher return; 2198*527a6266SJeff Kirsher 2199*527a6266SJeff Kirsher data |= BMCR_RESET; 2200*527a6266SJeff Kirsher if (phy_write(mp->phy, MII_BMCR, data) < 0) 2201*527a6266SJeff Kirsher return; 2202*527a6266SJeff Kirsher 2203*527a6266SJeff Kirsher do { 2204*527a6266SJeff Kirsher data = phy_read(mp->phy, MII_BMCR); 2205*527a6266SJeff Kirsher } while (data >= 0 && data & BMCR_RESET); 2206*527a6266SJeff Kirsher } 2207*527a6266SJeff Kirsher 2208*527a6266SJeff Kirsher static void port_start(struct mv643xx_eth_private *mp) 2209*527a6266SJeff Kirsher { 2210*527a6266SJeff Kirsher u32 pscr; 2211*527a6266SJeff Kirsher int i; 2212*527a6266SJeff Kirsher 2213*527a6266SJeff Kirsher /* 2214*527a6266SJeff Kirsher * Perform PHY reset, if there is a PHY. 2215*527a6266SJeff Kirsher */ 2216*527a6266SJeff Kirsher if (mp->phy != NULL) { 2217*527a6266SJeff Kirsher struct ethtool_cmd cmd; 2218*527a6266SJeff Kirsher 2219*527a6266SJeff Kirsher mv643xx_eth_get_settings(mp->dev, &cmd); 2220*527a6266SJeff Kirsher phy_reset(mp); 2221*527a6266SJeff Kirsher mv643xx_eth_set_settings(mp->dev, &cmd); 2222*527a6266SJeff Kirsher } 2223*527a6266SJeff Kirsher 2224*527a6266SJeff Kirsher /* 2225*527a6266SJeff Kirsher * Configure basic link parameters. 2226*527a6266SJeff Kirsher */ 2227*527a6266SJeff Kirsher pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2228*527a6266SJeff Kirsher 2229*527a6266SJeff Kirsher pscr |= SERIAL_PORT_ENABLE; 2230*527a6266SJeff Kirsher wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2231*527a6266SJeff Kirsher 2232*527a6266SJeff Kirsher pscr |= DO_NOT_FORCE_LINK_FAIL; 2233*527a6266SJeff Kirsher if (mp->phy == NULL) 2234*527a6266SJeff Kirsher pscr |= FORCE_LINK_PASS; 2235*527a6266SJeff Kirsher wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2236*527a6266SJeff Kirsher 2237*527a6266SJeff Kirsher /* 2238*527a6266SJeff Kirsher * Configure TX path and queues. 2239*527a6266SJeff Kirsher */ 2240*527a6266SJeff Kirsher tx_set_rate(mp, 1000000000, 16777216); 2241*527a6266SJeff Kirsher for (i = 0; i < mp->txq_count; i++) { 2242*527a6266SJeff Kirsher struct tx_queue *txq = mp->txq + i; 2243*527a6266SJeff Kirsher 2244*527a6266SJeff Kirsher txq_reset_hw_ptr(txq); 2245*527a6266SJeff Kirsher txq_set_rate(txq, 1000000000, 16777216); 2246*527a6266SJeff Kirsher txq_set_fixed_prio_mode(txq); 2247*527a6266SJeff Kirsher } 2248*527a6266SJeff Kirsher 2249*527a6266SJeff Kirsher /* 2250*527a6266SJeff Kirsher * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 2251*527a6266SJeff Kirsher * frames to RX queue #0, and include the pseudo-header when 2252*527a6266SJeff Kirsher * calculating receive checksums. 2253*527a6266SJeff Kirsher */ 2254*527a6266SJeff Kirsher mv643xx_eth_set_features(mp->dev, mp->dev->features); 2255*527a6266SJeff Kirsher 2256*527a6266SJeff Kirsher /* 2257*527a6266SJeff Kirsher * Treat BPDUs as normal multicasts, and disable partition mode. 2258*527a6266SJeff Kirsher */ 2259*527a6266SJeff Kirsher wrlp(mp, PORT_CONFIG_EXT, 0x00000000); 2260*527a6266SJeff Kirsher 2261*527a6266SJeff Kirsher /* 2262*527a6266SJeff Kirsher * Add configured unicast addresses to address filter table. 2263*527a6266SJeff Kirsher */ 2264*527a6266SJeff Kirsher mv643xx_eth_program_unicast_filter(mp->dev); 2265*527a6266SJeff Kirsher 2266*527a6266SJeff Kirsher /* 2267*527a6266SJeff Kirsher * Enable the receive queues. 2268*527a6266SJeff Kirsher */ 2269*527a6266SJeff Kirsher for (i = 0; i < mp->rxq_count; i++) { 2270*527a6266SJeff Kirsher struct rx_queue *rxq = mp->rxq + i; 2271*527a6266SJeff Kirsher u32 addr; 2272*527a6266SJeff Kirsher 2273*527a6266SJeff Kirsher addr = (u32)rxq->rx_desc_dma; 2274*527a6266SJeff Kirsher addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2275*527a6266SJeff Kirsher wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); 2276*527a6266SJeff Kirsher 2277*527a6266SJeff Kirsher rxq_enable(rxq); 2278*527a6266SJeff Kirsher } 2279*527a6266SJeff Kirsher } 2280*527a6266SJeff Kirsher 2281*527a6266SJeff Kirsher static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) 2282*527a6266SJeff Kirsher { 2283*527a6266SJeff Kirsher int skb_size; 2284*527a6266SJeff Kirsher 2285*527a6266SJeff Kirsher /* 2286*527a6266SJeff Kirsher * Reserve 2+14 bytes for an ethernet header (the hardware 2287*527a6266SJeff Kirsher * automatically prepends 2 bytes of dummy data to each 2288*527a6266SJeff Kirsher * received packet), 16 bytes for up to four VLAN tags, and 2289*527a6266SJeff Kirsher * 4 bytes for the trailing FCS -- 36 bytes total. 2290*527a6266SJeff Kirsher */ 2291*527a6266SJeff Kirsher skb_size = mp->dev->mtu + 36; 2292*527a6266SJeff Kirsher 2293*527a6266SJeff Kirsher /* 2294*527a6266SJeff Kirsher * Make sure that the skb size is a multiple of 8 bytes, as 2295*527a6266SJeff Kirsher * the lower three bits of the receive descriptor's buffer 2296*527a6266SJeff Kirsher * size field are ignored by the hardware. 2297*527a6266SJeff Kirsher */ 2298*527a6266SJeff Kirsher mp->skb_size = (skb_size + 7) & ~7; 2299*527a6266SJeff Kirsher 2300*527a6266SJeff Kirsher /* 2301*527a6266SJeff Kirsher * If NET_SKB_PAD is smaller than a cache line, 2302*527a6266SJeff Kirsher * netdev_alloc_skb() will cause skb->data to be misaligned 2303*527a6266SJeff Kirsher * to a cache line boundary. If this is the case, include 2304*527a6266SJeff Kirsher * some extra space to allow re-aligning the data area. 2305*527a6266SJeff Kirsher */ 2306*527a6266SJeff Kirsher mp->skb_size += SKB_DMA_REALIGN; 2307*527a6266SJeff Kirsher } 2308*527a6266SJeff Kirsher 2309*527a6266SJeff Kirsher static int mv643xx_eth_open(struct net_device *dev) 2310*527a6266SJeff Kirsher { 2311*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 2312*527a6266SJeff Kirsher int err; 2313*527a6266SJeff Kirsher int i; 2314*527a6266SJeff Kirsher 2315*527a6266SJeff Kirsher wrlp(mp, INT_CAUSE, 0); 2316*527a6266SJeff Kirsher wrlp(mp, INT_CAUSE_EXT, 0); 2317*527a6266SJeff Kirsher rdlp(mp, INT_CAUSE_EXT); 2318*527a6266SJeff Kirsher 2319*527a6266SJeff Kirsher err = request_irq(dev->irq, mv643xx_eth_irq, 2320*527a6266SJeff Kirsher IRQF_SHARED, dev->name, dev); 2321*527a6266SJeff Kirsher if (err) { 2322*527a6266SJeff Kirsher netdev_err(dev, "can't assign irq\n"); 2323*527a6266SJeff Kirsher return -EAGAIN; 2324*527a6266SJeff Kirsher } 2325*527a6266SJeff Kirsher 2326*527a6266SJeff Kirsher mv643xx_eth_recalc_skb_size(mp); 2327*527a6266SJeff Kirsher 2328*527a6266SJeff Kirsher napi_enable(&mp->napi); 2329*527a6266SJeff Kirsher 2330*527a6266SJeff Kirsher skb_queue_head_init(&mp->rx_recycle); 2331*527a6266SJeff Kirsher 2332*527a6266SJeff Kirsher mp->int_mask = INT_EXT; 2333*527a6266SJeff Kirsher 2334*527a6266SJeff Kirsher for (i = 0; i < mp->rxq_count; i++) { 2335*527a6266SJeff Kirsher err = rxq_init(mp, i); 2336*527a6266SJeff Kirsher if (err) { 2337*527a6266SJeff Kirsher while (--i >= 0) 2338*527a6266SJeff Kirsher rxq_deinit(mp->rxq + i); 2339*527a6266SJeff Kirsher goto out; 2340*527a6266SJeff Kirsher } 2341*527a6266SJeff Kirsher 2342*527a6266SJeff Kirsher rxq_refill(mp->rxq + i, INT_MAX); 2343*527a6266SJeff Kirsher mp->int_mask |= INT_RX_0 << i; 2344*527a6266SJeff Kirsher } 2345*527a6266SJeff Kirsher 2346*527a6266SJeff Kirsher if (mp->oom) { 2347*527a6266SJeff Kirsher mp->rx_oom.expires = jiffies + (HZ / 10); 2348*527a6266SJeff Kirsher add_timer(&mp->rx_oom); 2349*527a6266SJeff Kirsher } 2350*527a6266SJeff Kirsher 2351*527a6266SJeff Kirsher for (i = 0; i < mp->txq_count; i++) { 2352*527a6266SJeff Kirsher err = txq_init(mp, i); 2353*527a6266SJeff Kirsher if (err) { 2354*527a6266SJeff Kirsher while (--i >= 0) 2355*527a6266SJeff Kirsher txq_deinit(mp->txq + i); 2356*527a6266SJeff Kirsher goto out_free; 2357*527a6266SJeff Kirsher } 2358*527a6266SJeff Kirsher mp->int_mask |= INT_TX_END_0 << i; 2359*527a6266SJeff Kirsher } 2360*527a6266SJeff Kirsher 2361*527a6266SJeff Kirsher port_start(mp); 2362*527a6266SJeff Kirsher 2363*527a6266SJeff Kirsher wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2364*527a6266SJeff Kirsher wrlp(mp, INT_MASK, mp->int_mask); 2365*527a6266SJeff Kirsher 2366*527a6266SJeff Kirsher return 0; 2367*527a6266SJeff Kirsher 2368*527a6266SJeff Kirsher 2369*527a6266SJeff Kirsher out_free: 2370*527a6266SJeff Kirsher for (i = 0; i < mp->rxq_count; i++) 2371*527a6266SJeff Kirsher rxq_deinit(mp->rxq + i); 2372*527a6266SJeff Kirsher out: 2373*527a6266SJeff Kirsher free_irq(dev->irq, dev); 2374*527a6266SJeff Kirsher 2375*527a6266SJeff Kirsher return err; 2376*527a6266SJeff Kirsher } 2377*527a6266SJeff Kirsher 2378*527a6266SJeff Kirsher static void port_reset(struct mv643xx_eth_private *mp) 2379*527a6266SJeff Kirsher { 2380*527a6266SJeff Kirsher unsigned int data; 2381*527a6266SJeff Kirsher int i; 2382*527a6266SJeff Kirsher 2383*527a6266SJeff Kirsher for (i = 0; i < mp->rxq_count; i++) 2384*527a6266SJeff Kirsher rxq_disable(mp->rxq + i); 2385*527a6266SJeff Kirsher for (i = 0; i < mp->txq_count; i++) 2386*527a6266SJeff Kirsher txq_disable(mp->txq + i); 2387*527a6266SJeff Kirsher 2388*527a6266SJeff Kirsher while (1) { 2389*527a6266SJeff Kirsher u32 ps = rdlp(mp, PORT_STATUS); 2390*527a6266SJeff Kirsher 2391*527a6266SJeff Kirsher if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) 2392*527a6266SJeff Kirsher break; 2393*527a6266SJeff Kirsher udelay(10); 2394*527a6266SJeff Kirsher } 2395*527a6266SJeff Kirsher 2396*527a6266SJeff Kirsher /* Reset the Enable bit in the Configuration Register */ 2397*527a6266SJeff Kirsher data = rdlp(mp, PORT_SERIAL_CONTROL); 2398*527a6266SJeff Kirsher data &= ~(SERIAL_PORT_ENABLE | 2399*527a6266SJeff Kirsher DO_NOT_FORCE_LINK_FAIL | 2400*527a6266SJeff Kirsher FORCE_LINK_PASS); 2401*527a6266SJeff Kirsher wrlp(mp, PORT_SERIAL_CONTROL, data); 2402*527a6266SJeff Kirsher } 2403*527a6266SJeff Kirsher 2404*527a6266SJeff Kirsher static int mv643xx_eth_stop(struct net_device *dev) 2405*527a6266SJeff Kirsher { 2406*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 2407*527a6266SJeff Kirsher int i; 2408*527a6266SJeff Kirsher 2409*527a6266SJeff Kirsher wrlp(mp, INT_MASK_EXT, 0x00000000); 2410*527a6266SJeff Kirsher wrlp(mp, INT_MASK, 0x00000000); 2411*527a6266SJeff Kirsher rdlp(mp, INT_MASK); 2412*527a6266SJeff Kirsher 2413*527a6266SJeff Kirsher napi_disable(&mp->napi); 2414*527a6266SJeff Kirsher 2415*527a6266SJeff Kirsher del_timer_sync(&mp->rx_oom); 2416*527a6266SJeff Kirsher 2417*527a6266SJeff Kirsher netif_carrier_off(dev); 2418*527a6266SJeff Kirsher 2419*527a6266SJeff Kirsher free_irq(dev->irq, dev); 2420*527a6266SJeff Kirsher 2421*527a6266SJeff Kirsher port_reset(mp); 2422*527a6266SJeff Kirsher mv643xx_eth_get_stats(dev); 2423*527a6266SJeff Kirsher mib_counters_update(mp); 2424*527a6266SJeff Kirsher del_timer_sync(&mp->mib_counters_timer); 2425*527a6266SJeff Kirsher 2426*527a6266SJeff Kirsher skb_queue_purge(&mp->rx_recycle); 2427*527a6266SJeff Kirsher 2428*527a6266SJeff Kirsher for (i = 0; i < mp->rxq_count; i++) 2429*527a6266SJeff Kirsher rxq_deinit(mp->rxq + i); 2430*527a6266SJeff Kirsher for (i = 0; i < mp->txq_count; i++) 2431*527a6266SJeff Kirsher txq_deinit(mp->txq + i); 2432*527a6266SJeff Kirsher 2433*527a6266SJeff Kirsher return 0; 2434*527a6266SJeff Kirsher } 2435*527a6266SJeff Kirsher 2436*527a6266SJeff Kirsher static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2437*527a6266SJeff Kirsher { 2438*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 2439*527a6266SJeff Kirsher 2440*527a6266SJeff Kirsher if (mp->phy != NULL) 2441*527a6266SJeff Kirsher return phy_mii_ioctl(mp->phy, ifr, cmd); 2442*527a6266SJeff Kirsher 2443*527a6266SJeff Kirsher return -EOPNOTSUPP; 2444*527a6266SJeff Kirsher } 2445*527a6266SJeff Kirsher 2446*527a6266SJeff Kirsher static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2447*527a6266SJeff Kirsher { 2448*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 2449*527a6266SJeff Kirsher 2450*527a6266SJeff Kirsher if (new_mtu < 64 || new_mtu > 9500) 2451*527a6266SJeff Kirsher return -EINVAL; 2452*527a6266SJeff Kirsher 2453*527a6266SJeff Kirsher dev->mtu = new_mtu; 2454*527a6266SJeff Kirsher mv643xx_eth_recalc_skb_size(mp); 2455*527a6266SJeff Kirsher tx_set_rate(mp, 1000000000, 16777216); 2456*527a6266SJeff Kirsher 2457*527a6266SJeff Kirsher if (!netif_running(dev)) 2458*527a6266SJeff Kirsher return 0; 2459*527a6266SJeff Kirsher 2460*527a6266SJeff Kirsher /* 2461*527a6266SJeff Kirsher * Stop and then re-open the interface. This will allocate RX 2462*527a6266SJeff Kirsher * skbs of the new MTU. 2463*527a6266SJeff Kirsher * There is a possible danger that the open will not succeed, 2464*527a6266SJeff Kirsher * due to memory being full. 2465*527a6266SJeff Kirsher */ 2466*527a6266SJeff Kirsher mv643xx_eth_stop(dev); 2467*527a6266SJeff Kirsher if (mv643xx_eth_open(dev)) { 2468*527a6266SJeff Kirsher netdev_err(dev, 2469*527a6266SJeff Kirsher "fatal error on re-opening device after MTU change\n"); 2470*527a6266SJeff Kirsher } 2471*527a6266SJeff Kirsher 2472*527a6266SJeff Kirsher return 0; 2473*527a6266SJeff Kirsher } 2474*527a6266SJeff Kirsher 2475*527a6266SJeff Kirsher static void tx_timeout_task(struct work_struct *ugly) 2476*527a6266SJeff Kirsher { 2477*527a6266SJeff Kirsher struct mv643xx_eth_private *mp; 2478*527a6266SJeff Kirsher 2479*527a6266SJeff Kirsher mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2480*527a6266SJeff Kirsher if (netif_running(mp->dev)) { 2481*527a6266SJeff Kirsher netif_tx_stop_all_queues(mp->dev); 2482*527a6266SJeff Kirsher port_reset(mp); 2483*527a6266SJeff Kirsher port_start(mp); 2484*527a6266SJeff Kirsher netif_tx_wake_all_queues(mp->dev); 2485*527a6266SJeff Kirsher } 2486*527a6266SJeff Kirsher } 2487*527a6266SJeff Kirsher 2488*527a6266SJeff Kirsher static void mv643xx_eth_tx_timeout(struct net_device *dev) 2489*527a6266SJeff Kirsher { 2490*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 2491*527a6266SJeff Kirsher 2492*527a6266SJeff Kirsher netdev_info(dev, "tx timeout\n"); 2493*527a6266SJeff Kirsher 2494*527a6266SJeff Kirsher schedule_work(&mp->tx_timeout_task); 2495*527a6266SJeff Kirsher } 2496*527a6266SJeff Kirsher 2497*527a6266SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 2498*527a6266SJeff Kirsher static void mv643xx_eth_netpoll(struct net_device *dev) 2499*527a6266SJeff Kirsher { 2500*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = netdev_priv(dev); 2501*527a6266SJeff Kirsher 2502*527a6266SJeff Kirsher wrlp(mp, INT_MASK, 0x00000000); 2503*527a6266SJeff Kirsher rdlp(mp, INT_MASK); 2504*527a6266SJeff Kirsher 2505*527a6266SJeff Kirsher mv643xx_eth_irq(dev->irq, dev); 2506*527a6266SJeff Kirsher 2507*527a6266SJeff Kirsher wrlp(mp, INT_MASK, mp->int_mask); 2508*527a6266SJeff Kirsher } 2509*527a6266SJeff Kirsher #endif 2510*527a6266SJeff Kirsher 2511*527a6266SJeff Kirsher 2512*527a6266SJeff Kirsher /* platform glue ************************************************************/ 2513*527a6266SJeff Kirsher static void 2514*527a6266SJeff Kirsher mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, 2515*527a6266SJeff Kirsher struct mbus_dram_target_info *dram) 2516*527a6266SJeff Kirsher { 2517*527a6266SJeff Kirsher void __iomem *base = msp->base; 2518*527a6266SJeff Kirsher u32 win_enable; 2519*527a6266SJeff Kirsher u32 win_protect; 2520*527a6266SJeff Kirsher int i; 2521*527a6266SJeff Kirsher 2522*527a6266SJeff Kirsher for (i = 0; i < 6; i++) { 2523*527a6266SJeff Kirsher writel(0, base + WINDOW_BASE(i)); 2524*527a6266SJeff Kirsher writel(0, base + WINDOW_SIZE(i)); 2525*527a6266SJeff Kirsher if (i < 4) 2526*527a6266SJeff Kirsher writel(0, base + WINDOW_REMAP_HIGH(i)); 2527*527a6266SJeff Kirsher } 2528*527a6266SJeff Kirsher 2529*527a6266SJeff Kirsher win_enable = 0x3f; 2530*527a6266SJeff Kirsher win_protect = 0; 2531*527a6266SJeff Kirsher 2532*527a6266SJeff Kirsher for (i = 0; i < dram->num_cs; i++) { 2533*527a6266SJeff Kirsher struct mbus_dram_window *cs = dram->cs + i; 2534*527a6266SJeff Kirsher 2535*527a6266SJeff Kirsher writel((cs->base & 0xffff0000) | 2536*527a6266SJeff Kirsher (cs->mbus_attr << 8) | 2537*527a6266SJeff Kirsher dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 2538*527a6266SJeff Kirsher writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 2539*527a6266SJeff Kirsher 2540*527a6266SJeff Kirsher win_enable &= ~(1 << i); 2541*527a6266SJeff Kirsher win_protect |= 3 << (2 * i); 2542*527a6266SJeff Kirsher } 2543*527a6266SJeff Kirsher 2544*527a6266SJeff Kirsher writel(win_enable, base + WINDOW_BAR_ENABLE); 2545*527a6266SJeff Kirsher msp->win_protect = win_protect; 2546*527a6266SJeff Kirsher } 2547*527a6266SJeff Kirsher 2548*527a6266SJeff Kirsher static void infer_hw_params(struct mv643xx_eth_shared_private *msp) 2549*527a6266SJeff Kirsher { 2550*527a6266SJeff Kirsher /* 2551*527a6266SJeff Kirsher * Check whether we have a 14-bit coal limit field in bits 2552*527a6266SJeff Kirsher * [21:8], or a 16-bit coal limit in bits [25,21:7] of the 2553*527a6266SJeff Kirsher * SDMA config register. 2554*527a6266SJeff Kirsher */ 2555*527a6266SJeff Kirsher writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); 2556*527a6266SJeff Kirsher if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) 2557*527a6266SJeff Kirsher msp->extended_rx_coal_limit = 1; 2558*527a6266SJeff Kirsher else 2559*527a6266SJeff Kirsher msp->extended_rx_coal_limit = 0; 2560*527a6266SJeff Kirsher 2561*527a6266SJeff Kirsher /* 2562*527a6266SJeff Kirsher * Check whether the MAC supports TX rate control, and if 2563*527a6266SJeff Kirsher * yes, whether its associated registers are in the old or 2564*527a6266SJeff Kirsher * the new place. 2565*527a6266SJeff Kirsher */ 2566*527a6266SJeff Kirsher writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); 2567*527a6266SJeff Kirsher if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { 2568*527a6266SJeff Kirsher msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2569*527a6266SJeff Kirsher } else { 2570*527a6266SJeff Kirsher writel(7, msp->base + 0x0400 + TX_BW_RATE); 2571*527a6266SJeff Kirsher if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) 2572*527a6266SJeff Kirsher msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2573*527a6266SJeff Kirsher else 2574*527a6266SJeff Kirsher msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2575*527a6266SJeff Kirsher } 2576*527a6266SJeff Kirsher } 2577*527a6266SJeff Kirsher 2578*527a6266SJeff Kirsher static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2579*527a6266SJeff Kirsher { 2580*527a6266SJeff Kirsher static int mv643xx_eth_version_printed; 2581*527a6266SJeff Kirsher struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2582*527a6266SJeff Kirsher struct mv643xx_eth_shared_private *msp; 2583*527a6266SJeff Kirsher struct resource *res; 2584*527a6266SJeff Kirsher int ret; 2585*527a6266SJeff Kirsher 2586*527a6266SJeff Kirsher if (!mv643xx_eth_version_printed++) 2587*527a6266SJeff Kirsher pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", 2588*527a6266SJeff Kirsher mv643xx_eth_driver_version); 2589*527a6266SJeff Kirsher 2590*527a6266SJeff Kirsher ret = -EINVAL; 2591*527a6266SJeff Kirsher res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2592*527a6266SJeff Kirsher if (res == NULL) 2593*527a6266SJeff Kirsher goto out; 2594*527a6266SJeff Kirsher 2595*527a6266SJeff Kirsher ret = -ENOMEM; 2596*527a6266SJeff Kirsher msp = kzalloc(sizeof(*msp), GFP_KERNEL); 2597*527a6266SJeff Kirsher if (msp == NULL) 2598*527a6266SJeff Kirsher goto out; 2599*527a6266SJeff Kirsher 2600*527a6266SJeff Kirsher msp->base = ioremap(res->start, resource_size(res)); 2601*527a6266SJeff Kirsher if (msp->base == NULL) 2602*527a6266SJeff Kirsher goto out_free; 2603*527a6266SJeff Kirsher 2604*527a6266SJeff Kirsher /* 2605*527a6266SJeff Kirsher * Set up and register SMI bus. 2606*527a6266SJeff Kirsher */ 2607*527a6266SJeff Kirsher if (pd == NULL || pd->shared_smi == NULL) { 2608*527a6266SJeff Kirsher msp->smi_bus = mdiobus_alloc(); 2609*527a6266SJeff Kirsher if (msp->smi_bus == NULL) 2610*527a6266SJeff Kirsher goto out_unmap; 2611*527a6266SJeff Kirsher 2612*527a6266SJeff Kirsher msp->smi_bus->priv = msp; 2613*527a6266SJeff Kirsher msp->smi_bus->name = "mv643xx_eth smi"; 2614*527a6266SJeff Kirsher msp->smi_bus->read = smi_bus_read; 2615*527a6266SJeff Kirsher msp->smi_bus->write = smi_bus_write, 2616*527a6266SJeff Kirsher snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); 2617*527a6266SJeff Kirsher msp->smi_bus->parent = &pdev->dev; 2618*527a6266SJeff Kirsher msp->smi_bus->phy_mask = 0xffffffff; 2619*527a6266SJeff Kirsher if (mdiobus_register(msp->smi_bus) < 0) 2620*527a6266SJeff Kirsher goto out_free_mii_bus; 2621*527a6266SJeff Kirsher msp->smi = msp; 2622*527a6266SJeff Kirsher } else { 2623*527a6266SJeff Kirsher msp->smi = platform_get_drvdata(pd->shared_smi); 2624*527a6266SJeff Kirsher } 2625*527a6266SJeff Kirsher 2626*527a6266SJeff Kirsher msp->err_interrupt = NO_IRQ; 2627*527a6266SJeff Kirsher init_waitqueue_head(&msp->smi_busy_wait); 2628*527a6266SJeff Kirsher 2629*527a6266SJeff Kirsher /* 2630*527a6266SJeff Kirsher * Check whether the error interrupt is hooked up. 2631*527a6266SJeff Kirsher */ 2632*527a6266SJeff Kirsher res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2633*527a6266SJeff Kirsher if (res != NULL) { 2634*527a6266SJeff Kirsher int err; 2635*527a6266SJeff Kirsher 2636*527a6266SJeff Kirsher err = request_irq(res->start, mv643xx_eth_err_irq, 2637*527a6266SJeff Kirsher IRQF_SHARED, "mv643xx_eth", msp); 2638*527a6266SJeff Kirsher if (!err) { 2639*527a6266SJeff Kirsher writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); 2640*527a6266SJeff Kirsher msp->err_interrupt = res->start; 2641*527a6266SJeff Kirsher } 2642*527a6266SJeff Kirsher } 2643*527a6266SJeff Kirsher 2644*527a6266SJeff Kirsher /* 2645*527a6266SJeff Kirsher * (Re-)program MBUS remapping windows if we are asked to. 2646*527a6266SJeff Kirsher */ 2647*527a6266SJeff Kirsher if (pd != NULL && pd->dram != NULL) 2648*527a6266SJeff Kirsher mv643xx_eth_conf_mbus_windows(msp, pd->dram); 2649*527a6266SJeff Kirsher 2650*527a6266SJeff Kirsher /* 2651*527a6266SJeff Kirsher * Detect hardware parameters. 2652*527a6266SJeff Kirsher */ 2653*527a6266SJeff Kirsher msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; 2654*527a6266SJeff Kirsher msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2655*527a6266SJeff Kirsher pd->tx_csum_limit : 9 * 1024; 2656*527a6266SJeff Kirsher infer_hw_params(msp); 2657*527a6266SJeff Kirsher 2658*527a6266SJeff Kirsher platform_set_drvdata(pdev, msp); 2659*527a6266SJeff Kirsher 2660*527a6266SJeff Kirsher return 0; 2661*527a6266SJeff Kirsher 2662*527a6266SJeff Kirsher out_free_mii_bus: 2663*527a6266SJeff Kirsher mdiobus_free(msp->smi_bus); 2664*527a6266SJeff Kirsher out_unmap: 2665*527a6266SJeff Kirsher iounmap(msp->base); 2666*527a6266SJeff Kirsher out_free: 2667*527a6266SJeff Kirsher kfree(msp); 2668*527a6266SJeff Kirsher out: 2669*527a6266SJeff Kirsher return ret; 2670*527a6266SJeff Kirsher } 2671*527a6266SJeff Kirsher 2672*527a6266SJeff Kirsher static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2673*527a6266SJeff Kirsher { 2674*527a6266SJeff Kirsher struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2675*527a6266SJeff Kirsher struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2676*527a6266SJeff Kirsher 2677*527a6266SJeff Kirsher if (pd == NULL || pd->shared_smi == NULL) { 2678*527a6266SJeff Kirsher mdiobus_unregister(msp->smi_bus); 2679*527a6266SJeff Kirsher mdiobus_free(msp->smi_bus); 2680*527a6266SJeff Kirsher } 2681*527a6266SJeff Kirsher if (msp->err_interrupt != NO_IRQ) 2682*527a6266SJeff Kirsher free_irq(msp->err_interrupt, msp); 2683*527a6266SJeff Kirsher iounmap(msp->base); 2684*527a6266SJeff Kirsher kfree(msp); 2685*527a6266SJeff Kirsher 2686*527a6266SJeff Kirsher return 0; 2687*527a6266SJeff Kirsher } 2688*527a6266SJeff Kirsher 2689*527a6266SJeff Kirsher static struct platform_driver mv643xx_eth_shared_driver = { 2690*527a6266SJeff Kirsher .probe = mv643xx_eth_shared_probe, 2691*527a6266SJeff Kirsher .remove = mv643xx_eth_shared_remove, 2692*527a6266SJeff Kirsher .driver = { 2693*527a6266SJeff Kirsher .name = MV643XX_ETH_SHARED_NAME, 2694*527a6266SJeff Kirsher .owner = THIS_MODULE, 2695*527a6266SJeff Kirsher }, 2696*527a6266SJeff Kirsher }; 2697*527a6266SJeff Kirsher 2698*527a6266SJeff Kirsher static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) 2699*527a6266SJeff Kirsher { 2700*527a6266SJeff Kirsher int addr_shift = 5 * mp->port_num; 2701*527a6266SJeff Kirsher u32 data; 2702*527a6266SJeff Kirsher 2703*527a6266SJeff Kirsher data = rdl(mp, PHY_ADDR); 2704*527a6266SJeff Kirsher data &= ~(0x1f << addr_shift); 2705*527a6266SJeff Kirsher data |= (phy_addr & 0x1f) << addr_shift; 2706*527a6266SJeff Kirsher wrl(mp, PHY_ADDR, data); 2707*527a6266SJeff Kirsher } 2708*527a6266SJeff Kirsher 2709*527a6266SJeff Kirsher static int phy_addr_get(struct mv643xx_eth_private *mp) 2710*527a6266SJeff Kirsher { 2711*527a6266SJeff Kirsher unsigned int data; 2712*527a6266SJeff Kirsher 2713*527a6266SJeff Kirsher data = rdl(mp, PHY_ADDR); 2714*527a6266SJeff Kirsher 2715*527a6266SJeff Kirsher return (data >> (5 * mp->port_num)) & 0x1f; 2716*527a6266SJeff Kirsher } 2717*527a6266SJeff Kirsher 2718*527a6266SJeff Kirsher static void set_params(struct mv643xx_eth_private *mp, 2719*527a6266SJeff Kirsher struct mv643xx_eth_platform_data *pd) 2720*527a6266SJeff Kirsher { 2721*527a6266SJeff Kirsher struct net_device *dev = mp->dev; 2722*527a6266SJeff Kirsher 2723*527a6266SJeff Kirsher if (is_valid_ether_addr(pd->mac_addr)) 2724*527a6266SJeff Kirsher memcpy(dev->dev_addr, pd->mac_addr, 6); 2725*527a6266SJeff Kirsher else 2726*527a6266SJeff Kirsher uc_addr_get(mp, dev->dev_addr); 2727*527a6266SJeff Kirsher 2728*527a6266SJeff Kirsher mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2729*527a6266SJeff Kirsher if (pd->rx_queue_size) 2730*527a6266SJeff Kirsher mp->rx_ring_size = pd->rx_queue_size; 2731*527a6266SJeff Kirsher mp->rx_desc_sram_addr = pd->rx_sram_addr; 2732*527a6266SJeff Kirsher mp->rx_desc_sram_size = pd->rx_sram_size; 2733*527a6266SJeff Kirsher 2734*527a6266SJeff Kirsher mp->rxq_count = pd->rx_queue_count ? : 1; 2735*527a6266SJeff Kirsher 2736*527a6266SJeff Kirsher mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2737*527a6266SJeff Kirsher if (pd->tx_queue_size) 2738*527a6266SJeff Kirsher mp->tx_ring_size = pd->tx_queue_size; 2739*527a6266SJeff Kirsher mp->tx_desc_sram_addr = pd->tx_sram_addr; 2740*527a6266SJeff Kirsher mp->tx_desc_sram_size = pd->tx_sram_size; 2741*527a6266SJeff Kirsher 2742*527a6266SJeff Kirsher mp->txq_count = pd->tx_queue_count ? : 1; 2743*527a6266SJeff Kirsher } 2744*527a6266SJeff Kirsher 2745*527a6266SJeff Kirsher static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2746*527a6266SJeff Kirsher int phy_addr) 2747*527a6266SJeff Kirsher { 2748*527a6266SJeff Kirsher struct mii_bus *bus = mp->shared->smi->smi_bus; 2749*527a6266SJeff Kirsher struct phy_device *phydev; 2750*527a6266SJeff Kirsher int start; 2751*527a6266SJeff Kirsher int num; 2752*527a6266SJeff Kirsher int i; 2753*527a6266SJeff Kirsher 2754*527a6266SJeff Kirsher if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2755*527a6266SJeff Kirsher start = phy_addr_get(mp) & 0x1f; 2756*527a6266SJeff Kirsher num = 32; 2757*527a6266SJeff Kirsher } else { 2758*527a6266SJeff Kirsher start = phy_addr & 0x1f; 2759*527a6266SJeff Kirsher num = 1; 2760*527a6266SJeff Kirsher } 2761*527a6266SJeff Kirsher 2762*527a6266SJeff Kirsher phydev = NULL; 2763*527a6266SJeff Kirsher for (i = 0; i < num; i++) { 2764*527a6266SJeff Kirsher int addr = (start + i) & 0x1f; 2765*527a6266SJeff Kirsher 2766*527a6266SJeff Kirsher if (bus->phy_map[addr] == NULL) 2767*527a6266SJeff Kirsher mdiobus_scan(bus, addr); 2768*527a6266SJeff Kirsher 2769*527a6266SJeff Kirsher if (phydev == NULL) { 2770*527a6266SJeff Kirsher phydev = bus->phy_map[addr]; 2771*527a6266SJeff Kirsher if (phydev != NULL) 2772*527a6266SJeff Kirsher phy_addr_set(mp, addr); 2773*527a6266SJeff Kirsher } 2774*527a6266SJeff Kirsher } 2775*527a6266SJeff Kirsher 2776*527a6266SJeff Kirsher return phydev; 2777*527a6266SJeff Kirsher } 2778*527a6266SJeff Kirsher 2779*527a6266SJeff Kirsher static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) 2780*527a6266SJeff Kirsher { 2781*527a6266SJeff Kirsher struct phy_device *phy = mp->phy; 2782*527a6266SJeff Kirsher 2783*527a6266SJeff Kirsher phy_reset(mp); 2784*527a6266SJeff Kirsher 2785*527a6266SJeff Kirsher phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); 2786*527a6266SJeff Kirsher 2787*527a6266SJeff Kirsher if (speed == 0) { 2788*527a6266SJeff Kirsher phy->autoneg = AUTONEG_ENABLE; 2789*527a6266SJeff Kirsher phy->speed = 0; 2790*527a6266SJeff Kirsher phy->duplex = 0; 2791*527a6266SJeff Kirsher phy->advertising = phy->supported | ADVERTISED_Autoneg; 2792*527a6266SJeff Kirsher } else { 2793*527a6266SJeff Kirsher phy->autoneg = AUTONEG_DISABLE; 2794*527a6266SJeff Kirsher phy->advertising = 0; 2795*527a6266SJeff Kirsher phy->speed = speed; 2796*527a6266SJeff Kirsher phy->duplex = duplex; 2797*527a6266SJeff Kirsher } 2798*527a6266SJeff Kirsher phy_start_aneg(phy); 2799*527a6266SJeff Kirsher } 2800*527a6266SJeff Kirsher 2801*527a6266SJeff Kirsher static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 2802*527a6266SJeff Kirsher { 2803*527a6266SJeff Kirsher u32 pscr; 2804*527a6266SJeff Kirsher 2805*527a6266SJeff Kirsher pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2806*527a6266SJeff Kirsher if (pscr & SERIAL_PORT_ENABLE) { 2807*527a6266SJeff Kirsher pscr &= ~SERIAL_PORT_ENABLE; 2808*527a6266SJeff Kirsher wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2809*527a6266SJeff Kirsher } 2810*527a6266SJeff Kirsher 2811*527a6266SJeff Kirsher pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2812*527a6266SJeff Kirsher if (mp->phy == NULL) { 2813*527a6266SJeff Kirsher pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 2814*527a6266SJeff Kirsher if (speed == SPEED_1000) 2815*527a6266SJeff Kirsher pscr |= SET_GMII_SPEED_TO_1000; 2816*527a6266SJeff Kirsher else if (speed == SPEED_100) 2817*527a6266SJeff Kirsher pscr |= SET_MII_SPEED_TO_100; 2818*527a6266SJeff Kirsher 2819*527a6266SJeff Kirsher pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; 2820*527a6266SJeff Kirsher 2821*527a6266SJeff Kirsher pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; 2822*527a6266SJeff Kirsher if (duplex == DUPLEX_FULL) 2823*527a6266SJeff Kirsher pscr |= SET_FULL_DUPLEX_MODE; 2824*527a6266SJeff Kirsher } 2825*527a6266SJeff Kirsher 2826*527a6266SJeff Kirsher wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2827*527a6266SJeff Kirsher } 2828*527a6266SJeff Kirsher 2829*527a6266SJeff Kirsher static const struct net_device_ops mv643xx_eth_netdev_ops = { 2830*527a6266SJeff Kirsher .ndo_open = mv643xx_eth_open, 2831*527a6266SJeff Kirsher .ndo_stop = mv643xx_eth_stop, 2832*527a6266SJeff Kirsher .ndo_start_xmit = mv643xx_eth_xmit, 2833*527a6266SJeff Kirsher .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, 2834*527a6266SJeff Kirsher .ndo_set_mac_address = mv643xx_eth_set_mac_address, 2835*527a6266SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 2836*527a6266SJeff Kirsher .ndo_do_ioctl = mv643xx_eth_ioctl, 2837*527a6266SJeff Kirsher .ndo_change_mtu = mv643xx_eth_change_mtu, 2838*527a6266SJeff Kirsher .ndo_set_features = mv643xx_eth_set_features, 2839*527a6266SJeff Kirsher .ndo_tx_timeout = mv643xx_eth_tx_timeout, 2840*527a6266SJeff Kirsher .ndo_get_stats = mv643xx_eth_get_stats, 2841*527a6266SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 2842*527a6266SJeff Kirsher .ndo_poll_controller = mv643xx_eth_netpoll, 2843*527a6266SJeff Kirsher #endif 2844*527a6266SJeff Kirsher }; 2845*527a6266SJeff Kirsher 2846*527a6266SJeff Kirsher static int mv643xx_eth_probe(struct platform_device *pdev) 2847*527a6266SJeff Kirsher { 2848*527a6266SJeff Kirsher struct mv643xx_eth_platform_data *pd; 2849*527a6266SJeff Kirsher struct mv643xx_eth_private *mp; 2850*527a6266SJeff Kirsher struct net_device *dev; 2851*527a6266SJeff Kirsher struct resource *res; 2852*527a6266SJeff Kirsher int err; 2853*527a6266SJeff Kirsher 2854*527a6266SJeff Kirsher pd = pdev->dev.platform_data; 2855*527a6266SJeff Kirsher if (pd == NULL) { 2856*527a6266SJeff Kirsher dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); 2857*527a6266SJeff Kirsher return -ENODEV; 2858*527a6266SJeff Kirsher } 2859*527a6266SJeff Kirsher 2860*527a6266SJeff Kirsher if (pd->shared == NULL) { 2861*527a6266SJeff Kirsher dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); 2862*527a6266SJeff Kirsher return -ENODEV; 2863*527a6266SJeff Kirsher } 2864*527a6266SJeff Kirsher 2865*527a6266SJeff Kirsher dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); 2866*527a6266SJeff Kirsher if (!dev) 2867*527a6266SJeff Kirsher return -ENOMEM; 2868*527a6266SJeff Kirsher 2869*527a6266SJeff Kirsher mp = netdev_priv(dev); 2870*527a6266SJeff Kirsher platform_set_drvdata(pdev, mp); 2871*527a6266SJeff Kirsher 2872*527a6266SJeff Kirsher mp->shared = platform_get_drvdata(pd->shared); 2873*527a6266SJeff Kirsher mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); 2874*527a6266SJeff Kirsher mp->port_num = pd->port_number; 2875*527a6266SJeff Kirsher 2876*527a6266SJeff Kirsher mp->dev = dev; 2877*527a6266SJeff Kirsher 2878*527a6266SJeff Kirsher set_params(mp, pd); 2879*527a6266SJeff Kirsher netif_set_real_num_tx_queues(dev, mp->txq_count); 2880*527a6266SJeff Kirsher netif_set_real_num_rx_queues(dev, mp->rxq_count); 2881*527a6266SJeff Kirsher 2882*527a6266SJeff Kirsher if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2883*527a6266SJeff Kirsher mp->phy = phy_scan(mp, pd->phy_addr); 2884*527a6266SJeff Kirsher 2885*527a6266SJeff Kirsher if (mp->phy != NULL) 2886*527a6266SJeff Kirsher phy_init(mp, pd->speed, pd->duplex); 2887*527a6266SJeff Kirsher 2888*527a6266SJeff Kirsher SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2889*527a6266SJeff Kirsher 2890*527a6266SJeff Kirsher init_pscr(mp, pd->speed, pd->duplex); 2891*527a6266SJeff Kirsher 2892*527a6266SJeff Kirsher 2893*527a6266SJeff Kirsher mib_counters_clear(mp); 2894*527a6266SJeff Kirsher 2895*527a6266SJeff Kirsher init_timer(&mp->mib_counters_timer); 2896*527a6266SJeff Kirsher mp->mib_counters_timer.data = (unsigned long)mp; 2897*527a6266SJeff Kirsher mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2898*527a6266SJeff Kirsher mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2899*527a6266SJeff Kirsher add_timer(&mp->mib_counters_timer); 2900*527a6266SJeff Kirsher 2901*527a6266SJeff Kirsher spin_lock_init(&mp->mib_counters_lock); 2902*527a6266SJeff Kirsher 2903*527a6266SJeff Kirsher INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2904*527a6266SJeff Kirsher 2905*527a6266SJeff Kirsher netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); 2906*527a6266SJeff Kirsher 2907*527a6266SJeff Kirsher init_timer(&mp->rx_oom); 2908*527a6266SJeff Kirsher mp->rx_oom.data = (unsigned long)mp; 2909*527a6266SJeff Kirsher mp->rx_oom.function = oom_timer_wrapper; 2910*527a6266SJeff Kirsher 2911*527a6266SJeff Kirsher 2912*527a6266SJeff Kirsher res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2913*527a6266SJeff Kirsher BUG_ON(!res); 2914*527a6266SJeff Kirsher dev->irq = res->start; 2915*527a6266SJeff Kirsher 2916*527a6266SJeff Kirsher dev->netdev_ops = &mv643xx_eth_netdev_ops; 2917*527a6266SJeff Kirsher 2918*527a6266SJeff Kirsher dev->watchdog_timeo = 2 * HZ; 2919*527a6266SJeff Kirsher dev->base_addr = 0; 2920*527a6266SJeff Kirsher 2921*527a6266SJeff Kirsher dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 2922*527a6266SJeff Kirsher NETIF_F_RXCSUM | NETIF_F_LRO; 2923*527a6266SJeff Kirsher dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 2924*527a6266SJeff Kirsher dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2925*527a6266SJeff Kirsher 2926*527a6266SJeff Kirsher SET_NETDEV_DEV(dev, &pdev->dev); 2927*527a6266SJeff Kirsher 2928*527a6266SJeff Kirsher if (mp->shared->win_protect) 2929*527a6266SJeff Kirsher wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); 2930*527a6266SJeff Kirsher 2931*527a6266SJeff Kirsher netif_carrier_off(dev); 2932*527a6266SJeff Kirsher 2933*527a6266SJeff Kirsher wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); 2934*527a6266SJeff Kirsher 2935*527a6266SJeff Kirsher set_rx_coal(mp, 250); 2936*527a6266SJeff Kirsher set_tx_coal(mp, 0); 2937*527a6266SJeff Kirsher 2938*527a6266SJeff Kirsher err = register_netdev(dev); 2939*527a6266SJeff Kirsher if (err) 2940*527a6266SJeff Kirsher goto out; 2941*527a6266SJeff Kirsher 2942*527a6266SJeff Kirsher netdev_notice(dev, "port %d with MAC address %pM\n", 2943*527a6266SJeff Kirsher mp->port_num, dev->dev_addr); 2944*527a6266SJeff Kirsher 2945*527a6266SJeff Kirsher if (mp->tx_desc_sram_size > 0) 2946*527a6266SJeff Kirsher netdev_notice(dev, "configured with sram\n"); 2947*527a6266SJeff Kirsher 2948*527a6266SJeff Kirsher return 0; 2949*527a6266SJeff Kirsher 2950*527a6266SJeff Kirsher out: 2951*527a6266SJeff Kirsher free_netdev(dev); 2952*527a6266SJeff Kirsher 2953*527a6266SJeff Kirsher return err; 2954*527a6266SJeff Kirsher } 2955*527a6266SJeff Kirsher 2956*527a6266SJeff Kirsher static int mv643xx_eth_remove(struct platform_device *pdev) 2957*527a6266SJeff Kirsher { 2958*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2959*527a6266SJeff Kirsher 2960*527a6266SJeff Kirsher unregister_netdev(mp->dev); 2961*527a6266SJeff Kirsher if (mp->phy != NULL) 2962*527a6266SJeff Kirsher phy_detach(mp->phy); 2963*527a6266SJeff Kirsher cancel_work_sync(&mp->tx_timeout_task); 2964*527a6266SJeff Kirsher free_netdev(mp->dev); 2965*527a6266SJeff Kirsher 2966*527a6266SJeff Kirsher platform_set_drvdata(pdev, NULL); 2967*527a6266SJeff Kirsher 2968*527a6266SJeff Kirsher return 0; 2969*527a6266SJeff Kirsher } 2970*527a6266SJeff Kirsher 2971*527a6266SJeff Kirsher static void mv643xx_eth_shutdown(struct platform_device *pdev) 2972*527a6266SJeff Kirsher { 2973*527a6266SJeff Kirsher struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2974*527a6266SJeff Kirsher 2975*527a6266SJeff Kirsher /* Mask all interrupts on ethernet port */ 2976*527a6266SJeff Kirsher wrlp(mp, INT_MASK, 0); 2977*527a6266SJeff Kirsher rdlp(mp, INT_MASK); 2978*527a6266SJeff Kirsher 2979*527a6266SJeff Kirsher if (netif_running(mp->dev)) 2980*527a6266SJeff Kirsher port_reset(mp); 2981*527a6266SJeff Kirsher } 2982*527a6266SJeff Kirsher 2983*527a6266SJeff Kirsher static struct platform_driver mv643xx_eth_driver = { 2984*527a6266SJeff Kirsher .probe = mv643xx_eth_probe, 2985*527a6266SJeff Kirsher .remove = mv643xx_eth_remove, 2986*527a6266SJeff Kirsher .shutdown = mv643xx_eth_shutdown, 2987*527a6266SJeff Kirsher .driver = { 2988*527a6266SJeff Kirsher .name = MV643XX_ETH_NAME, 2989*527a6266SJeff Kirsher .owner = THIS_MODULE, 2990*527a6266SJeff Kirsher }, 2991*527a6266SJeff Kirsher }; 2992*527a6266SJeff Kirsher 2993*527a6266SJeff Kirsher static int __init mv643xx_eth_init_module(void) 2994*527a6266SJeff Kirsher { 2995*527a6266SJeff Kirsher int rc; 2996*527a6266SJeff Kirsher 2997*527a6266SJeff Kirsher rc = platform_driver_register(&mv643xx_eth_shared_driver); 2998*527a6266SJeff Kirsher if (!rc) { 2999*527a6266SJeff Kirsher rc = platform_driver_register(&mv643xx_eth_driver); 3000*527a6266SJeff Kirsher if (rc) 3001*527a6266SJeff Kirsher platform_driver_unregister(&mv643xx_eth_shared_driver); 3002*527a6266SJeff Kirsher } 3003*527a6266SJeff Kirsher 3004*527a6266SJeff Kirsher return rc; 3005*527a6266SJeff Kirsher } 3006*527a6266SJeff Kirsher module_init(mv643xx_eth_init_module); 3007*527a6266SJeff Kirsher 3008*527a6266SJeff Kirsher static void __exit mv643xx_eth_cleanup_module(void) 3009*527a6266SJeff Kirsher { 3010*527a6266SJeff Kirsher platform_driver_unregister(&mv643xx_eth_driver); 3011*527a6266SJeff Kirsher platform_driver_unregister(&mv643xx_eth_shared_driver); 3012*527a6266SJeff Kirsher } 3013*527a6266SJeff Kirsher module_exit(mv643xx_eth_cleanup_module); 3014*527a6266SJeff Kirsher 3015*527a6266SJeff Kirsher MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " 3016*527a6266SJeff Kirsher "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); 3017*527a6266SJeff Kirsher MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 3018*527a6266SJeff Kirsher MODULE_LICENSE("GPL"); 3019*527a6266SJeff Kirsher MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); 3020*527a6266SJeff Kirsher MODULE_ALIAS("platform:" MV643XX_ETH_NAME); 3021