1c5aff182SThomas Petazzoni /* 2c5aff182SThomas Petazzoni * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. 3c5aff182SThomas Petazzoni * 4c5aff182SThomas Petazzoni * Copyright (C) 2012 Marvell 5c5aff182SThomas Petazzoni * 6c5aff182SThomas Petazzoni * Rami Rosen <rosenr@marvell.com> 7c5aff182SThomas Petazzoni * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 8c5aff182SThomas Petazzoni * 9c5aff182SThomas Petazzoni * This file is licensed under the terms of the GNU General Public 10c5aff182SThomas Petazzoni * License version 2. This program is licensed "as is" without any 11c5aff182SThomas Petazzoni * warranty of any kind, whether express or implied. 12c5aff182SThomas Petazzoni */ 13c5aff182SThomas Petazzoni 14c5aff182SThomas Petazzoni #include <linux/kernel.h> 15c5aff182SThomas Petazzoni #include <linux/netdevice.h> 16c5aff182SThomas Petazzoni #include <linux/etherdevice.h> 17c5aff182SThomas Petazzoni #include <linux/platform_device.h> 18c5aff182SThomas Petazzoni #include <linux/skbuff.h> 19c5aff182SThomas Petazzoni #include <linux/inetdevice.h> 20c5aff182SThomas Petazzoni #include <linux/mbus.h> 21c5aff182SThomas Petazzoni #include <linux/module.h> 22c5aff182SThomas Petazzoni #include <linux/interrupt.h> 232d39d120SDavid S. Miller #include <linux/if_vlan.h> 24c5aff182SThomas Petazzoni #include <net/ip.h> 25c5aff182SThomas Petazzoni #include <net/ipv6.h> 26c3f0dd38SThomas Petazzoni #include <linux/io.h> 272adb719dSEzequiel Garcia #include <net/tso.h> 28c5aff182SThomas Petazzoni #include <linux/of.h> 29c5aff182SThomas Petazzoni #include <linux/of_irq.h> 30c5aff182SThomas Petazzoni #include <linux/of_mdio.h> 31c5aff182SThomas Petazzoni #include <linux/of_net.h> 32c5aff182SThomas Petazzoni #include <linux/of_address.h> 33c5aff182SThomas Petazzoni #include <linux/phy.h> 34189dd626SThomas Petazzoni #include <linux/clk.h> 35f8642885SMaxime Ripard #include <linux/cpu.h> 36c5aff182SThomas Petazzoni 37c5aff182SThomas Petazzoni /* Registers */ 38c5aff182SThomas Petazzoni #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 39e5bdf689SMarcin Wojtas #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) 40c5aff182SThomas Petazzoni #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 41c5aff182SThomas Petazzoni #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 42c5aff182SThomas Petazzoni #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 43c5aff182SThomas Petazzoni #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) 44c5aff182SThomas Petazzoni #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) 45c5aff182SThomas Petazzoni #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) 46c5aff182SThomas Petazzoni #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 47c5aff182SThomas Petazzoni #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) 48c5aff182SThomas Petazzoni #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) 49c5aff182SThomas Petazzoni #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff 50c5aff182SThomas Petazzoni #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) 51c5aff182SThomas Petazzoni #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 52c5aff182SThomas Petazzoni #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 53c5aff182SThomas Petazzoni #define MVNETA_PORT_RX_RESET 0x1cc0 54c5aff182SThomas Petazzoni #define MVNETA_PORT_RX_DMA_RESET BIT(0) 55c5aff182SThomas Petazzoni #define MVNETA_PHY_ADDR 0x2000 56c5aff182SThomas Petazzoni #define MVNETA_PHY_ADDR_MASK 0x1f 57c5aff182SThomas Petazzoni #define MVNETA_MBUS_RETRY 0x2010 58c5aff182SThomas Petazzoni #define MVNETA_UNIT_INTR_CAUSE 0x2080 59c5aff182SThomas Petazzoni #define MVNETA_UNIT_CONTROL 0x20B0 60c5aff182SThomas Petazzoni #define MVNETA_PHY_POLLING_ENABLE BIT(1) 61c5aff182SThomas Petazzoni #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) 62c5aff182SThomas Petazzoni #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 63c5aff182SThomas Petazzoni #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 64c5aff182SThomas Petazzoni #define MVNETA_BASE_ADDR_ENABLE 0x2290 65db6ba9a5SMarcin Wojtas #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 66c5aff182SThomas Petazzoni #define MVNETA_PORT_CONFIG 0x2400 67c5aff182SThomas Petazzoni #define MVNETA_UNI_PROMISC_MODE BIT(0) 68c5aff182SThomas Petazzoni #define MVNETA_DEF_RXQ(q) ((q) << 1) 69c5aff182SThomas Petazzoni #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) 70c5aff182SThomas Petazzoni #define MVNETA_TX_UNSET_ERR_SUM BIT(12) 71c5aff182SThomas Petazzoni #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) 72c5aff182SThomas Petazzoni #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) 73c5aff182SThomas Petazzoni #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) 74c5aff182SThomas Petazzoni #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) 75c5aff182SThomas Petazzoni #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ 76c5aff182SThomas Petazzoni MVNETA_DEF_RXQ_ARP(q) | \ 77c5aff182SThomas Petazzoni MVNETA_DEF_RXQ_TCP(q) | \ 78c5aff182SThomas Petazzoni MVNETA_DEF_RXQ_UDP(q) | \ 79c5aff182SThomas Petazzoni MVNETA_DEF_RXQ_BPDU(q) | \ 80c5aff182SThomas Petazzoni MVNETA_TX_UNSET_ERR_SUM | \ 81c5aff182SThomas Petazzoni MVNETA_RX_CSUM_WITH_PSEUDO_HDR) 82c5aff182SThomas Petazzoni #define MVNETA_PORT_CONFIG_EXTEND 0x2404 83c5aff182SThomas Petazzoni #define MVNETA_MAC_ADDR_LOW 0x2414 84c5aff182SThomas Petazzoni #define MVNETA_MAC_ADDR_HIGH 0x2418 85c5aff182SThomas Petazzoni #define MVNETA_SDMA_CONFIG 0x241c 86c5aff182SThomas Petazzoni #define MVNETA_SDMA_BRST_SIZE_16 4 87c5aff182SThomas Petazzoni #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 88c5aff182SThomas Petazzoni #define MVNETA_RX_NO_DATA_SWAP BIT(4) 89c5aff182SThomas Petazzoni #define MVNETA_TX_NO_DATA_SWAP BIT(5) 909ad8fef6SThomas Petazzoni #define MVNETA_DESC_SWAP BIT(6) 91c5aff182SThomas Petazzoni #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 92c5aff182SThomas Petazzoni #define MVNETA_PORT_STATUS 0x2444 93c5aff182SThomas Petazzoni #define MVNETA_TX_IN_PRGRS BIT(1) 94c5aff182SThomas Petazzoni #define MVNETA_TX_FIFO_EMPTY BIT(8) 95c5aff182SThomas Petazzoni #define MVNETA_RX_MIN_FRAME_SIZE 0x247c 963f1dd4bcSThomas Petazzoni #define MVNETA_SERDES_CFG 0x24A0 975445eaf3SArnaud Patard \(Rtp\) #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 983f1dd4bcSThomas Petazzoni #define MVNETA_QSGMII_SERDES_PROTO 0x0667 99c5aff182SThomas Petazzoni #define MVNETA_TYPE_PRIO 0x24bc 100c5aff182SThomas Petazzoni #define MVNETA_FORCE_UNI BIT(21) 101c5aff182SThomas Petazzoni #define MVNETA_TXQ_CMD_1 0x24e4 102c5aff182SThomas Petazzoni #define MVNETA_TXQ_CMD 0x2448 103c5aff182SThomas Petazzoni #define MVNETA_TXQ_DISABLE_SHIFT 8 104c5aff182SThomas Petazzoni #define MVNETA_TXQ_ENABLE_MASK 0x000000ff 105e483911fSAndrew Lunn #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 106e483911fSAndrew Lunn #define MVNETA_OVERRUN_FRAME_COUNT 0x2488 107898b2970SStas Sergeev #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 108898b2970SStas Sergeev #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) 109c5aff182SThomas Petazzoni #define MVNETA_ACC_MODE 0x2500 110c5aff182SThomas Petazzoni #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 111c5aff182SThomas Petazzoni #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff 112c5aff182SThomas Petazzoni #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 1132dcf75e2SGregory CLEMENT #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) 11450bf8cb6SGregory CLEMENT #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) 115c5aff182SThomas Petazzoni #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) 11640ba35e7Swilly tarreau 1172dcf75e2SGregory CLEMENT /* Exception Interrupt Port/Queue Cause register 1182dcf75e2SGregory CLEMENT * 1192dcf75e2SGregory CLEMENT * Their behavior depend of the mapping done using the PCPX2Q 1202dcf75e2SGregory CLEMENT * registers. For a given CPU if the bit associated to a queue is not 1212dcf75e2SGregory CLEMENT * set, then for the register a read from this CPU will always return 1222dcf75e2SGregory CLEMENT * 0 and a write won't do anything 1232dcf75e2SGregory CLEMENT */ 12440ba35e7Swilly tarreau 125c5aff182SThomas Petazzoni #define MVNETA_INTR_NEW_CAUSE 0x25a0 126c5aff182SThomas Petazzoni #define MVNETA_INTR_NEW_MASK 0x25a4 12740ba35e7Swilly tarreau 12840ba35e7Swilly tarreau /* bits 0..7 = TXQ SENT, one bit per queue. 12940ba35e7Swilly tarreau * bits 8..15 = RXQ OCCUP, one bit per queue. 13040ba35e7Swilly tarreau * bits 16..23 = RXQ FREE, one bit per queue. 13140ba35e7Swilly tarreau * bit 29 = OLD_REG_SUM, see old reg ? 13240ba35e7Swilly tarreau * bit 30 = TX_ERR_SUM, one bit for 4 ports 13340ba35e7Swilly tarreau * bit 31 = MISC_SUM, one bit for 4 ports 13440ba35e7Swilly tarreau */ 13540ba35e7Swilly tarreau #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) 13640ba35e7Swilly tarreau #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) 13740ba35e7Swilly tarreau #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 13840ba35e7Swilly tarreau #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) 139898b2970SStas Sergeev #define MVNETA_MISCINTR_INTR_MASK BIT(31) 14040ba35e7Swilly tarreau 141c5aff182SThomas Petazzoni #define MVNETA_INTR_OLD_CAUSE 0x25a8 142c5aff182SThomas Petazzoni #define MVNETA_INTR_OLD_MASK 0x25ac 14340ba35e7Swilly tarreau 14440ba35e7Swilly tarreau /* Data Path Port/Queue Cause Register */ 145c5aff182SThomas Petazzoni #define MVNETA_INTR_MISC_CAUSE 0x25b0 146c5aff182SThomas Petazzoni #define MVNETA_INTR_MISC_MASK 0x25b4 14740ba35e7Swilly tarreau 14840ba35e7Swilly tarreau #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) 14940ba35e7Swilly tarreau #define MVNETA_CAUSE_LINK_CHANGE BIT(1) 15040ba35e7Swilly tarreau #define MVNETA_CAUSE_PTP BIT(4) 15140ba35e7Swilly tarreau 15240ba35e7Swilly tarreau #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) 15340ba35e7Swilly tarreau #define MVNETA_CAUSE_RX_OVERRUN BIT(8) 15440ba35e7Swilly tarreau #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) 15540ba35e7Swilly tarreau #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) 15640ba35e7Swilly tarreau #define MVNETA_CAUSE_TX_UNDERUN BIT(11) 15740ba35e7Swilly tarreau #define MVNETA_CAUSE_PRBS_ERR BIT(12) 15840ba35e7Swilly tarreau #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) 15940ba35e7Swilly tarreau #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) 16040ba35e7Swilly tarreau 16140ba35e7Swilly tarreau #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 16240ba35e7Swilly tarreau #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) 16340ba35e7Swilly tarreau #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) 16440ba35e7Swilly tarreau 16540ba35e7Swilly tarreau #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 16640ba35e7Swilly tarreau #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) 16740ba35e7Swilly tarreau #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) 16840ba35e7Swilly tarreau 169c5aff182SThomas Petazzoni #define MVNETA_INTR_ENABLE 0x25b8 170c5aff182SThomas Petazzoni #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 171dc1aadf6SMarcin Wojtas #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff 17240ba35e7Swilly tarreau 173c5aff182SThomas Petazzoni #define MVNETA_RXQ_CMD 0x2680 174c5aff182SThomas Petazzoni #define MVNETA_RXQ_DISABLE_SHIFT 8 175c5aff182SThomas Petazzoni #define MVNETA_RXQ_ENABLE_MASK 0x000000ff 176c5aff182SThomas Petazzoni #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) 177c5aff182SThomas Petazzoni #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) 178c5aff182SThomas Petazzoni #define MVNETA_GMAC_CTRL_0 0x2c00 179c5aff182SThomas Petazzoni #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 180c5aff182SThomas Petazzoni #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 181c5aff182SThomas Petazzoni #define MVNETA_GMAC0_PORT_ENABLE BIT(0) 182c5aff182SThomas Petazzoni #define MVNETA_GMAC_CTRL_2 0x2c08 183898b2970SStas Sergeev #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) 184a79121d3SThomas Petazzoni #define MVNETA_GMAC2_PCS_ENABLE BIT(3) 185c5aff182SThomas Petazzoni #define MVNETA_GMAC2_PORT_RGMII BIT(4) 186c5aff182SThomas Petazzoni #define MVNETA_GMAC2_PORT_RESET BIT(6) 187c5aff182SThomas Petazzoni #define MVNETA_GMAC_STATUS 0x2c10 188c5aff182SThomas Petazzoni #define MVNETA_GMAC_LINK_UP BIT(0) 189c5aff182SThomas Petazzoni #define MVNETA_GMAC_SPEED_1000 BIT(1) 190c5aff182SThomas Petazzoni #define MVNETA_GMAC_SPEED_100 BIT(2) 191c5aff182SThomas Petazzoni #define MVNETA_GMAC_FULL_DUPLEX BIT(3) 192c5aff182SThomas Petazzoni #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) 193c5aff182SThomas Petazzoni #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) 194c5aff182SThomas Petazzoni #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) 195c5aff182SThomas Petazzoni #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) 196c5aff182SThomas Petazzoni #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 197c5aff182SThomas Petazzoni #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 198c5aff182SThomas Petazzoni #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 199898b2970SStas Sergeev #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) 200c5aff182SThomas Petazzoni #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 201c5aff182SThomas Petazzoni #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 20271408602SThomas Petazzoni #define MVNETA_GMAC_AN_SPEED_EN BIT(7) 203898b2970SStas Sergeev #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) 204c5aff182SThomas Petazzoni #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 20571408602SThomas Petazzoni #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) 206e483911fSAndrew Lunn #define MVNETA_MIB_COUNTERS_BASE 0x3000 207c5aff182SThomas Petazzoni #define MVNETA_MIB_LATE_COLLISION 0x7c 208c5aff182SThomas Petazzoni #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 209c5aff182SThomas Petazzoni #define MVNETA_DA_FILT_OTH_MCAST 0x3500 210c5aff182SThomas Petazzoni #define MVNETA_DA_FILT_UCAST_BASE 0x3600 211c5aff182SThomas Petazzoni #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) 212c5aff182SThomas Petazzoni #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) 213c5aff182SThomas Petazzoni #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 214c5aff182SThomas Petazzoni #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) 215c5aff182SThomas Petazzoni #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) 216c5aff182SThomas Petazzoni #define MVNETA_TXQ_DEC_SENT_SHIFT 16 217c5aff182SThomas Petazzoni #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) 218c5aff182SThomas Petazzoni #define MVNETA_TXQ_SENT_DESC_SHIFT 16 219c5aff182SThomas Petazzoni #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 220c5aff182SThomas Petazzoni #define MVNETA_PORT_TX_RESET 0x3cf0 221c5aff182SThomas Petazzoni #define MVNETA_PORT_TX_DMA_RESET BIT(0) 222c5aff182SThomas Petazzoni #define MVNETA_TX_MTU 0x3e0c 223c5aff182SThomas Petazzoni #define MVNETA_TX_TOKEN_SIZE 0x3e14 224c5aff182SThomas Petazzoni #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff 225c5aff182SThomas Petazzoni #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) 226c5aff182SThomas Petazzoni #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff 227c5aff182SThomas Petazzoni 228c5aff182SThomas Petazzoni #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 229c5aff182SThomas Petazzoni 230c5aff182SThomas Petazzoni /* Descriptor ring Macros */ 231c5aff182SThomas Petazzoni #define MVNETA_QUEUE_NEXT_DESC(q, index) \ 232c5aff182SThomas Petazzoni (((index) < (q)->last_desc) ? ((index) + 1) : 0) 233c5aff182SThomas Petazzoni 234c5aff182SThomas Petazzoni /* Various constants */ 235c5aff182SThomas Petazzoni 236c5aff182SThomas Petazzoni /* Coalescing */ 237aebea2baSwilly tarreau #define MVNETA_TXDONE_COAL_PKTS 1 238c5aff182SThomas Petazzoni #define MVNETA_RX_COAL_PKTS 32 239c5aff182SThomas Petazzoni #define MVNETA_RX_COAL_USEC 100 240c5aff182SThomas Petazzoni 2416a20c175SThomas Petazzoni /* The two bytes Marvell header. Either contains a special value used 242c5aff182SThomas Petazzoni * by Marvell switches when a specific hardware mode is enabled (not 243c5aff182SThomas Petazzoni * supported by this driver) or is filled automatically by zeroes on 244c5aff182SThomas Petazzoni * the RX side. Those two bytes being at the front of the Ethernet 245c5aff182SThomas Petazzoni * header, they allow to have the IP header aligned on a 4 bytes 246c5aff182SThomas Petazzoni * boundary automatically: the hardware skips those two bytes on its 247c5aff182SThomas Petazzoni * own. 248c5aff182SThomas Petazzoni */ 249c5aff182SThomas Petazzoni #define MVNETA_MH_SIZE 2 250c5aff182SThomas Petazzoni 251c5aff182SThomas Petazzoni #define MVNETA_VLAN_TAG_LEN 4 252c5aff182SThomas Petazzoni 253c5aff182SThomas Petazzoni #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 2549110ee07SMarcin Wojtas #define MVNETA_TX_CSUM_DEF_SIZE 1600 255c5aff182SThomas Petazzoni #define MVNETA_TX_CSUM_MAX_SIZE 9800 256c5aff182SThomas Petazzoni #define MVNETA_ACC_MODE_EXT 1 257c5aff182SThomas Petazzoni 258c5aff182SThomas Petazzoni /* Timeout constants */ 259c5aff182SThomas Petazzoni #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 260c5aff182SThomas Petazzoni #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 261c5aff182SThomas Petazzoni #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 262c5aff182SThomas Petazzoni 263c5aff182SThomas Petazzoni #define MVNETA_TX_MTU_MAX 0x3ffff 264c5aff182SThomas Petazzoni 2659a401deaSGregory CLEMENT /* The RSS lookup table actually has 256 entries but we do not use 2669a401deaSGregory CLEMENT * them yet 2679a401deaSGregory CLEMENT */ 2689a401deaSGregory CLEMENT #define MVNETA_RSS_LU_TABLE_SIZE 1 2699a401deaSGregory CLEMENT 2702adb719dSEzequiel Garcia /* TSO header size */ 2712adb719dSEzequiel Garcia #define TSO_HEADER_SIZE 128 2722adb719dSEzequiel Garcia 273c5aff182SThomas Petazzoni /* Max number of Rx descriptors */ 274c5aff182SThomas Petazzoni #define MVNETA_MAX_RXD 128 275c5aff182SThomas Petazzoni 276c5aff182SThomas Petazzoni /* Max number of Tx descriptors */ 277c5aff182SThomas Petazzoni #define MVNETA_MAX_TXD 532 278c5aff182SThomas Petazzoni 2798eef5f97SEzequiel Garcia /* Max number of allowed TCP segments for software TSO */ 2808eef5f97SEzequiel Garcia #define MVNETA_MAX_TSO_SEGS 100 2818eef5f97SEzequiel Garcia 2828eef5f97SEzequiel Garcia #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 2838eef5f97SEzequiel Garcia 284c5aff182SThomas Petazzoni /* descriptor aligned size */ 285c5aff182SThomas Petazzoni #define MVNETA_DESC_ALIGNED_SIZE 32 286c5aff182SThomas Petazzoni 287c5aff182SThomas Petazzoni #define MVNETA_RX_PKT_SIZE(mtu) \ 288c5aff182SThomas Petazzoni ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 289c5aff182SThomas Petazzoni ETH_HLEN + ETH_FCS_LEN, \ 290c5aff182SThomas Petazzoni MVNETA_CPU_D_CACHE_LINE_SIZE) 291c5aff182SThomas Petazzoni 2922e3173a3SEzequiel Garcia #define IS_TSO_HEADER(txq, addr) \ 2932e3173a3SEzequiel Garcia ((addr >= txq->tso_hdrs_phys) && \ 2942e3173a3SEzequiel Garcia (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) 2952e3173a3SEzequiel Garcia 296c5aff182SThomas Petazzoni #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 297c5aff182SThomas Petazzoni 2989b0cdefaSRussell King struct mvneta_statistic { 2999b0cdefaSRussell King unsigned short offset; 3009b0cdefaSRussell King unsigned short type; 3019b0cdefaSRussell King const char name[ETH_GSTRING_LEN]; 3029b0cdefaSRussell King }; 3039b0cdefaSRussell King 3049b0cdefaSRussell King #define T_REG_32 32 3059b0cdefaSRussell King #define T_REG_64 64 3069b0cdefaSRussell King 3079b0cdefaSRussell King static const struct mvneta_statistic mvneta_statistics[] = { 3089b0cdefaSRussell King { 0x3000, T_REG_64, "good_octets_received", }, 3099b0cdefaSRussell King { 0x3010, T_REG_32, "good_frames_received", }, 3109b0cdefaSRussell King { 0x3008, T_REG_32, "bad_octets_received", }, 3119b0cdefaSRussell King { 0x3014, T_REG_32, "bad_frames_received", }, 3129b0cdefaSRussell King { 0x3018, T_REG_32, "broadcast_frames_received", }, 3139b0cdefaSRussell King { 0x301c, T_REG_32, "multicast_frames_received", }, 3149b0cdefaSRussell King { 0x3050, T_REG_32, "unrec_mac_control_received", }, 3159b0cdefaSRussell King { 0x3058, T_REG_32, "good_fc_received", }, 3169b0cdefaSRussell King { 0x305c, T_REG_32, "bad_fc_received", }, 3179b0cdefaSRussell King { 0x3060, T_REG_32, "undersize_received", }, 3189b0cdefaSRussell King { 0x3064, T_REG_32, "fragments_received", }, 3199b0cdefaSRussell King { 0x3068, T_REG_32, "oversize_received", }, 3209b0cdefaSRussell King { 0x306c, T_REG_32, "jabber_received", }, 3219b0cdefaSRussell King { 0x3070, T_REG_32, "mac_receive_error", }, 3229b0cdefaSRussell King { 0x3074, T_REG_32, "bad_crc_event", }, 3239b0cdefaSRussell King { 0x3078, T_REG_32, "collision", }, 3249b0cdefaSRussell King { 0x307c, T_REG_32, "late_collision", }, 3259b0cdefaSRussell King { 0x2484, T_REG_32, "rx_discard", }, 3269b0cdefaSRussell King { 0x2488, T_REG_32, "rx_overrun", }, 3279b0cdefaSRussell King { 0x3020, T_REG_32, "frames_64_octets", }, 3289b0cdefaSRussell King { 0x3024, T_REG_32, "frames_65_to_127_octets", }, 3299b0cdefaSRussell King { 0x3028, T_REG_32, "frames_128_to_255_octets", }, 3309b0cdefaSRussell King { 0x302c, T_REG_32, "frames_256_to_511_octets", }, 3319b0cdefaSRussell King { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, 3329b0cdefaSRussell King { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, 3339b0cdefaSRussell King { 0x3038, T_REG_64, "good_octets_sent", }, 3349b0cdefaSRussell King { 0x3040, T_REG_32, "good_frames_sent", }, 3359b0cdefaSRussell King { 0x3044, T_REG_32, "excessive_collision", }, 3369b0cdefaSRussell King { 0x3048, T_REG_32, "multicast_frames_sent", }, 3379b0cdefaSRussell King { 0x304c, T_REG_32, "broadcast_frames_sent", }, 3389b0cdefaSRussell King { 0x3054, T_REG_32, "fc_sent", }, 3399b0cdefaSRussell King { 0x300c, T_REG_32, "internal_mac_transmit_err", }, 3409b0cdefaSRussell King }; 3419b0cdefaSRussell King 34274c41b04Swilly tarreau struct mvneta_pcpu_stats { 343c5aff182SThomas Petazzoni struct u64_stats_sync syncp; 34474c41b04Swilly tarreau u64 rx_packets; 34574c41b04Swilly tarreau u64 rx_bytes; 34674c41b04Swilly tarreau u64 tx_packets; 34774c41b04Swilly tarreau u64 tx_bytes; 348c5aff182SThomas Petazzoni }; 349c5aff182SThomas Petazzoni 35012bb03b4SMaxime Ripard struct mvneta_pcpu_port { 35112bb03b4SMaxime Ripard /* Pointer to the shared port */ 35212bb03b4SMaxime Ripard struct mvneta_port *pp; 35312bb03b4SMaxime Ripard 35412bb03b4SMaxime Ripard /* Pointer to the CPU-local NAPI struct */ 35512bb03b4SMaxime Ripard struct napi_struct napi; 35612bb03b4SMaxime Ripard 35712bb03b4SMaxime Ripard /* Cause of the previous interrupt */ 35812bb03b4SMaxime Ripard u32 cause_rx_tx; 35912bb03b4SMaxime Ripard }; 36012bb03b4SMaxime Ripard 361c5aff182SThomas Petazzoni struct mvneta_port { 36212bb03b4SMaxime Ripard struct mvneta_pcpu_port __percpu *ports; 36312bb03b4SMaxime Ripard struct mvneta_pcpu_stats __percpu *stats; 36412bb03b4SMaxime Ripard 365c5aff182SThomas Petazzoni int pkt_size; 3668ec2cd48Swilly tarreau unsigned int frag_size; 367c5aff182SThomas Petazzoni void __iomem *base; 368c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxqs; 369c5aff182SThomas Petazzoni struct mvneta_tx_queue *txqs; 370c5aff182SThomas Petazzoni struct net_device *dev; 371f8642885SMaxime Ripard struct notifier_block cpu_notifier; 37290b74c01SGregory CLEMENT int rxq_def; 373c5aff182SThomas Petazzoni 374c5aff182SThomas Petazzoni /* Core clock */ 375189dd626SThomas Petazzoni struct clk *clk; 376c5aff182SThomas Petazzoni u8 mcast_count[256]; 377c5aff182SThomas Petazzoni u16 tx_ring_size; 378c5aff182SThomas Petazzoni u16 rx_ring_size; 379c5aff182SThomas Petazzoni 380c5aff182SThomas Petazzoni struct mii_bus *mii_bus; 381c5aff182SThomas Petazzoni struct phy_device *phy_dev; 382c5aff182SThomas Petazzoni phy_interface_t phy_interface; 383c5aff182SThomas Petazzoni struct device_node *phy_node; 384c5aff182SThomas Petazzoni unsigned int link; 385c5aff182SThomas Petazzoni unsigned int duplex; 386c5aff182SThomas Petazzoni unsigned int speed; 387b65657fcSSimon Guinot unsigned int tx_csum_limit; 3880c0744fcSStas Sergeev unsigned int use_inband_status:1; 3899b0cdefaSRussell King 3909b0cdefaSRussell King u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; 3919a401deaSGregory CLEMENT 3929a401deaSGregory CLEMENT u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; 393c5aff182SThomas Petazzoni }; 394c5aff182SThomas Petazzoni 3956a20c175SThomas Petazzoni /* The mvneta_tx_desc and mvneta_rx_desc structures describe the 396c5aff182SThomas Petazzoni * layout of the transmit and reception DMA descriptors, and their 397c5aff182SThomas Petazzoni * layout is therefore defined by the hardware design 398c5aff182SThomas Petazzoni */ 3996083ed44SThomas Petazzoni 400c5aff182SThomas Petazzoni #define MVNETA_TX_L3_OFF_SHIFT 0 401c5aff182SThomas Petazzoni #define MVNETA_TX_IP_HLEN_SHIFT 8 402c5aff182SThomas Petazzoni #define MVNETA_TX_L4_UDP BIT(16) 403c5aff182SThomas Petazzoni #define MVNETA_TX_L3_IP6 BIT(17) 404c5aff182SThomas Petazzoni #define MVNETA_TXD_IP_CSUM BIT(18) 405c5aff182SThomas Petazzoni #define MVNETA_TXD_Z_PAD BIT(19) 406c5aff182SThomas Petazzoni #define MVNETA_TXD_L_DESC BIT(20) 407c5aff182SThomas Petazzoni #define MVNETA_TXD_F_DESC BIT(21) 408c5aff182SThomas Petazzoni #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ 409c5aff182SThomas Petazzoni MVNETA_TXD_L_DESC | \ 410c5aff182SThomas Petazzoni MVNETA_TXD_F_DESC) 411c5aff182SThomas Petazzoni #define MVNETA_TX_L4_CSUM_FULL BIT(30) 412c5aff182SThomas Petazzoni #define MVNETA_TX_L4_CSUM_NOT BIT(31) 413c5aff182SThomas Petazzoni 414c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_CRC 0x0 415c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_SUMMARY BIT(16) 416c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_OVERRUN BIT(17) 417c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_LEN BIT(18) 418c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) 419c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) 420c5aff182SThomas Petazzoni #define MVNETA_RXD_L3_IP4 BIT(25) 421c5aff182SThomas Petazzoni #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) 422c5aff182SThomas Petazzoni #define MVNETA_RXD_L4_CSUM_OK BIT(30) 423c5aff182SThomas Petazzoni 4249ad8fef6SThomas Petazzoni #if defined(__LITTLE_ENDIAN) 4256083ed44SThomas Petazzoni struct mvneta_tx_desc { 4266083ed44SThomas Petazzoni u32 command; /* Options used by HW for packet transmitting.*/ 4276083ed44SThomas Petazzoni u16 reserverd1; /* csum_l4 (for future use) */ 4286083ed44SThomas Petazzoni u16 data_size; /* Data size of transmitted packet in bytes */ 4296083ed44SThomas Petazzoni u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 4306083ed44SThomas Petazzoni u32 reserved2; /* hw_cmd - (for future use, PMT) */ 4316083ed44SThomas Petazzoni u32 reserved3[4]; /* Reserved - (for future use) */ 4326083ed44SThomas Petazzoni }; 4336083ed44SThomas Petazzoni 4346083ed44SThomas Petazzoni struct mvneta_rx_desc { 4356083ed44SThomas Petazzoni u32 status; /* Info about received packet */ 436c5aff182SThomas Petazzoni u16 reserved1; /* pnc_info - (for future use, PnC) */ 437c5aff182SThomas Petazzoni u16 data_size; /* Size of received packet in bytes */ 4386083ed44SThomas Petazzoni 439c5aff182SThomas Petazzoni u32 buf_phys_addr; /* Physical address of the buffer */ 440c5aff182SThomas Petazzoni u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 4416083ed44SThomas Petazzoni 442c5aff182SThomas Petazzoni u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 443c5aff182SThomas Petazzoni u16 reserved3; /* prefetch_cmd, for future use */ 444c5aff182SThomas Petazzoni u16 reserved4; /* csum_l4 - (for future use, PnC) */ 4456083ed44SThomas Petazzoni 446c5aff182SThomas Petazzoni u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 447c5aff182SThomas Petazzoni u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 448c5aff182SThomas Petazzoni }; 4499ad8fef6SThomas Petazzoni #else 4509ad8fef6SThomas Petazzoni struct mvneta_tx_desc { 4519ad8fef6SThomas Petazzoni u16 data_size; /* Data size of transmitted packet in bytes */ 4529ad8fef6SThomas Petazzoni u16 reserverd1; /* csum_l4 (for future use) */ 4539ad8fef6SThomas Petazzoni u32 command; /* Options used by HW for packet transmitting.*/ 4549ad8fef6SThomas Petazzoni u32 reserved2; /* hw_cmd - (for future use, PMT) */ 4559ad8fef6SThomas Petazzoni u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 4569ad8fef6SThomas Petazzoni u32 reserved3[4]; /* Reserved - (for future use) */ 4579ad8fef6SThomas Petazzoni }; 4589ad8fef6SThomas Petazzoni 4599ad8fef6SThomas Petazzoni struct mvneta_rx_desc { 4609ad8fef6SThomas Petazzoni u16 data_size; /* Size of received packet in bytes */ 4619ad8fef6SThomas Petazzoni u16 reserved1; /* pnc_info - (for future use, PnC) */ 4629ad8fef6SThomas Petazzoni u32 status; /* Info about received packet */ 4639ad8fef6SThomas Petazzoni 4649ad8fef6SThomas Petazzoni u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 4659ad8fef6SThomas Petazzoni u32 buf_phys_addr; /* Physical address of the buffer */ 4669ad8fef6SThomas Petazzoni 4679ad8fef6SThomas Petazzoni u16 reserved4; /* csum_l4 - (for future use, PnC) */ 4689ad8fef6SThomas Petazzoni u16 reserved3; /* prefetch_cmd, for future use */ 4699ad8fef6SThomas Petazzoni u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 4709ad8fef6SThomas Petazzoni 4719ad8fef6SThomas Petazzoni u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 4729ad8fef6SThomas Petazzoni u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 4739ad8fef6SThomas Petazzoni }; 4749ad8fef6SThomas Petazzoni #endif 475c5aff182SThomas Petazzoni 476c5aff182SThomas Petazzoni struct mvneta_tx_queue { 477c5aff182SThomas Petazzoni /* Number of this TX queue, in the range 0-7 */ 478c5aff182SThomas Petazzoni u8 id; 479c5aff182SThomas Petazzoni 480c5aff182SThomas Petazzoni /* Number of TX DMA descriptors in the descriptor ring */ 481c5aff182SThomas Petazzoni int size; 482c5aff182SThomas Petazzoni 483c5aff182SThomas Petazzoni /* Number of currently used TX DMA descriptor in the 4846a20c175SThomas Petazzoni * descriptor ring 4856a20c175SThomas Petazzoni */ 486c5aff182SThomas Petazzoni int count; 4878eef5f97SEzequiel Garcia int tx_stop_threshold; 4888eef5f97SEzequiel Garcia int tx_wake_threshold; 489c5aff182SThomas Petazzoni 490c5aff182SThomas Petazzoni /* Array of transmitted skb */ 491c5aff182SThomas Petazzoni struct sk_buff **tx_skb; 492c5aff182SThomas Petazzoni 493c5aff182SThomas Petazzoni /* Index of last TX DMA descriptor that was inserted */ 494c5aff182SThomas Petazzoni int txq_put_index; 495c5aff182SThomas Petazzoni 496c5aff182SThomas Petazzoni /* Index of the TX DMA descriptor to be cleaned up */ 497c5aff182SThomas Petazzoni int txq_get_index; 498c5aff182SThomas Petazzoni 499c5aff182SThomas Petazzoni u32 done_pkts_coal; 500c5aff182SThomas Petazzoni 501c5aff182SThomas Petazzoni /* Virtual address of the TX DMA descriptors array */ 502c5aff182SThomas Petazzoni struct mvneta_tx_desc *descs; 503c5aff182SThomas Petazzoni 504c5aff182SThomas Petazzoni /* DMA address of the TX DMA descriptors array */ 505c5aff182SThomas Petazzoni dma_addr_t descs_phys; 506c5aff182SThomas Petazzoni 507c5aff182SThomas Petazzoni /* Index of the last TX DMA descriptor */ 508c5aff182SThomas Petazzoni int last_desc; 509c5aff182SThomas Petazzoni 510c5aff182SThomas Petazzoni /* Index of the next TX DMA descriptor to process */ 511c5aff182SThomas Petazzoni int next_desc_to_proc; 5122adb719dSEzequiel Garcia 5132adb719dSEzequiel Garcia /* DMA buffers for TSO headers */ 5142adb719dSEzequiel Garcia char *tso_hdrs; 5152adb719dSEzequiel Garcia 5162adb719dSEzequiel Garcia /* DMA address of TSO headers */ 5172adb719dSEzequiel Garcia dma_addr_t tso_hdrs_phys; 51850bf8cb6SGregory CLEMENT 51950bf8cb6SGregory CLEMENT /* Affinity mask for CPUs*/ 52050bf8cb6SGregory CLEMENT cpumask_t affinity_mask; 521c5aff182SThomas Petazzoni }; 522c5aff182SThomas Petazzoni 523c5aff182SThomas Petazzoni struct mvneta_rx_queue { 524c5aff182SThomas Petazzoni /* rx queue number, in the range 0-7 */ 525c5aff182SThomas Petazzoni u8 id; 526c5aff182SThomas Petazzoni 527c5aff182SThomas Petazzoni /* num of rx descriptors in the rx descriptor ring */ 528c5aff182SThomas Petazzoni int size; 529c5aff182SThomas Petazzoni 530c5aff182SThomas Petazzoni /* counter of times when mvneta_refill() failed */ 531c5aff182SThomas Petazzoni int missed; 532c5aff182SThomas Petazzoni 533c5aff182SThomas Petazzoni u32 pkts_coal; 534c5aff182SThomas Petazzoni u32 time_coal; 535c5aff182SThomas Petazzoni 536c5aff182SThomas Petazzoni /* Virtual address of the RX DMA descriptors array */ 537c5aff182SThomas Petazzoni struct mvneta_rx_desc *descs; 538c5aff182SThomas Petazzoni 539c5aff182SThomas Petazzoni /* DMA address of the RX DMA descriptors array */ 540c5aff182SThomas Petazzoni dma_addr_t descs_phys; 541c5aff182SThomas Petazzoni 542c5aff182SThomas Petazzoni /* Index of the last RX DMA descriptor */ 543c5aff182SThomas Petazzoni int last_desc; 544c5aff182SThomas Petazzoni 545c5aff182SThomas Petazzoni /* Index of the next RX DMA descriptor to process */ 546c5aff182SThomas Petazzoni int next_desc_to_proc; 547c5aff182SThomas Petazzoni }; 548c5aff182SThomas Petazzoni 549edadb7faSEzequiel Garcia /* The hardware supports eight (8) rx queues, but we are only allowing 550edadb7faSEzequiel Garcia * the first one to be used. Therefore, let's just allocate one queue. 551edadb7faSEzequiel Garcia */ 552d8936657SMaxime Ripard static int rxq_number = 8; 553c5aff182SThomas Petazzoni static int txq_number = 8; 554c5aff182SThomas Petazzoni 555c5aff182SThomas Petazzoni static int rxq_def; 556c5aff182SThomas Petazzoni 557f19fadfcSwilly tarreau static int rx_copybreak __read_mostly = 256; 558f19fadfcSwilly tarreau 559c5aff182SThomas Petazzoni #define MVNETA_DRIVER_NAME "mvneta" 560c5aff182SThomas Petazzoni #define MVNETA_DRIVER_VERSION "1.0" 561c5aff182SThomas Petazzoni 562c5aff182SThomas Petazzoni /* Utility/helper methods */ 563c5aff182SThomas Petazzoni 564c5aff182SThomas Petazzoni /* Write helper method */ 565c5aff182SThomas Petazzoni static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) 566c5aff182SThomas Petazzoni { 567c5aff182SThomas Petazzoni writel(data, pp->base + offset); 568c5aff182SThomas Petazzoni } 569c5aff182SThomas Petazzoni 570c5aff182SThomas Petazzoni /* Read helper method */ 571c5aff182SThomas Petazzoni static u32 mvreg_read(struct mvneta_port *pp, u32 offset) 572c5aff182SThomas Petazzoni { 573c5aff182SThomas Petazzoni return readl(pp->base + offset); 574c5aff182SThomas Petazzoni } 575c5aff182SThomas Petazzoni 576c5aff182SThomas Petazzoni /* Increment txq get counter */ 577c5aff182SThomas Petazzoni static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) 578c5aff182SThomas Petazzoni { 579c5aff182SThomas Petazzoni txq->txq_get_index++; 580c5aff182SThomas Petazzoni if (txq->txq_get_index == txq->size) 581c5aff182SThomas Petazzoni txq->txq_get_index = 0; 582c5aff182SThomas Petazzoni } 583c5aff182SThomas Petazzoni 584c5aff182SThomas Petazzoni /* Increment txq put counter */ 585c5aff182SThomas Petazzoni static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) 586c5aff182SThomas Petazzoni { 587c5aff182SThomas Petazzoni txq->txq_put_index++; 588c5aff182SThomas Petazzoni if (txq->txq_put_index == txq->size) 589c5aff182SThomas Petazzoni txq->txq_put_index = 0; 590c5aff182SThomas Petazzoni } 591c5aff182SThomas Petazzoni 592c5aff182SThomas Petazzoni 593c5aff182SThomas Petazzoni /* Clear all MIB counters */ 594c5aff182SThomas Petazzoni static void mvneta_mib_counters_clear(struct mvneta_port *pp) 595c5aff182SThomas Petazzoni { 596c5aff182SThomas Petazzoni int i; 597c5aff182SThomas Petazzoni u32 dummy; 598c5aff182SThomas Petazzoni 599c5aff182SThomas Petazzoni /* Perform dummy reads from MIB counters */ 600c5aff182SThomas Petazzoni for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) 601c5aff182SThomas Petazzoni dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); 602e483911fSAndrew Lunn dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); 603e483911fSAndrew Lunn dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); 604c5aff182SThomas Petazzoni } 605c5aff182SThomas Petazzoni 606c5aff182SThomas Petazzoni /* Get System Network Statistics */ 607c5aff182SThomas Petazzoni struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev, 608c5aff182SThomas Petazzoni struct rtnl_link_stats64 *stats) 609c5aff182SThomas Petazzoni { 610c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 611c5aff182SThomas Petazzoni unsigned int start; 61274c41b04Swilly tarreau int cpu; 613c5aff182SThomas Petazzoni 61474c41b04Swilly tarreau for_each_possible_cpu(cpu) { 61574c41b04Swilly tarreau struct mvneta_pcpu_stats *cpu_stats; 61674c41b04Swilly tarreau u64 rx_packets; 61774c41b04Swilly tarreau u64 rx_bytes; 61874c41b04Swilly tarreau u64 tx_packets; 61974c41b04Swilly tarreau u64 tx_bytes; 620c5aff182SThomas Petazzoni 62174c41b04Swilly tarreau cpu_stats = per_cpu_ptr(pp->stats, cpu); 622c5aff182SThomas Petazzoni do { 62357a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 62474c41b04Swilly tarreau rx_packets = cpu_stats->rx_packets; 62574c41b04Swilly tarreau rx_bytes = cpu_stats->rx_bytes; 62674c41b04Swilly tarreau tx_packets = cpu_stats->tx_packets; 62774c41b04Swilly tarreau tx_bytes = cpu_stats->tx_bytes; 62857a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 629c5aff182SThomas Petazzoni 63074c41b04Swilly tarreau stats->rx_packets += rx_packets; 63174c41b04Swilly tarreau stats->rx_bytes += rx_bytes; 63274c41b04Swilly tarreau stats->tx_packets += tx_packets; 63374c41b04Swilly tarreau stats->tx_bytes += tx_bytes; 63474c41b04Swilly tarreau } 635c5aff182SThomas Petazzoni 636c5aff182SThomas Petazzoni stats->rx_errors = dev->stats.rx_errors; 637c5aff182SThomas Petazzoni stats->rx_dropped = dev->stats.rx_dropped; 638c5aff182SThomas Petazzoni 639c5aff182SThomas Petazzoni stats->tx_dropped = dev->stats.tx_dropped; 640c5aff182SThomas Petazzoni 641c5aff182SThomas Petazzoni return stats; 642c5aff182SThomas Petazzoni } 643c5aff182SThomas Petazzoni 644c5aff182SThomas Petazzoni /* Rx descriptors helper methods */ 645c5aff182SThomas Petazzoni 6465428213cSwilly tarreau /* Checks whether the RX descriptor having this status is both the first 6475428213cSwilly tarreau * and the last descriptor for the RX packet. Each RX packet is currently 648c5aff182SThomas Petazzoni * received through a single RX descriptor, so not having each RX 649c5aff182SThomas Petazzoni * descriptor with its first and last bits set is an error 650c5aff182SThomas Petazzoni */ 6515428213cSwilly tarreau static int mvneta_rxq_desc_is_first_last(u32 status) 652c5aff182SThomas Petazzoni { 6535428213cSwilly tarreau return (status & MVNETA_RXD_FIRST_LAST_DESC) == 654c5aff182SThomas Petazzoni MVNETA_RXD_FIRST_LAST_DESC; 655c5aff182SThomas Petazzoni } 656c5aff182SThomas Petazzoni 657c5aff182SThomas Petazzoni /* Add number of descriptors ready to receive new packets */ 658c5aff182SThomas Petazzoni static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, 659c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, 660c5aff182SThomas Petazzoni int ndescs) 661c5aff182SThomas Petazzoni { 662c5aff182SThomas Petazzoni /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can 6636a20c175SThomas Petazzoni * be added at once 6646a20c175SThomas Petazzoni */ 665c5aff182SThomas Petazzoni while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { 666c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 667c5aff182SThomas Petazzoni (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << 668c5aff182SThomas Petazzoni MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 669c5aff182SThomas Petazzoni ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; 670c5aff182SThomas Petazzoni } 671c5aff182SThomas Petazzoni 672c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 673c5aff182SThomas Petazzoni (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 674c5aff182SThomas Petazzoni } 675c5aff182SThomas Petazzoni 676c5aff182SThomas Petazzoni /* Get number of RX descriptors occupied by received packets */ 677c5aff182SThomas Petazzoni static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, 678c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 679c5aff182SThomas Petazzoni { 680c5aff182SThomas Petazzoni u32 val; 681c5aff182SThomas Petazzoni 682c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); 683c5aff182SThomas Petazzoni return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; 684c5aff182SThomas Petazzoni } 685c5aff182SThomas Petazzoni 6866a20c175SThomas Petazzoni /* Update num of rx desc called upon return from rx path or 687c5aff182SThomas Petazzoni * from mvneta_rxq_drop_pkts(). 688c5aff182SThomas Petazzoni */ 689c5aff182SThomas Petazzoni static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, 690c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, 691c5aff182SThomas Petazzoni int rx_done, int rx_filled) 692c5aff182SThomas Petazzoni { 693c5aff182SThomas Petazzoni u32 val; 694c5aff182SThomas Petazzoni 695c5aff182SThomas Petazzoni if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { 696c5aff182SThomas Petazzoni val = rx_done | 697c5aff182SThomas Petazzoni (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); 698c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 699c5aff182SThomas Petazzoni return; 700c5aff182SThomas Petazzoni } 701c5aff182SThomas Petazzoni 702c5aff182SThomas Petazzoni /* Only 255 descriptors can be added at once */ 703c5aff182SThomas Petazzoni while ((rx_done > 0) || (rx_filled > 0)) { 704c5aff182SThomas Petazzoni if (rx_done <= 0xff) { 705c5aff182SThomas Petazzoni val = rx_done; 706c5aff182SThomas Petazzoni rx_done = 0; 707c5aff182SThomas Petazzoni } else { 708c5aff182SThomas Petazzoni val = 0xff; 709c5aff182SThomas Petazzoni rx_done -= 0xff; 710c5aff182SThomas Petazzoni } 711c5aff182SThomas Petazzoni if (rx_filled <= 0xff) { 712c5aff182SThomas Petazzoni val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 713c5aff182SThomas Petazzoni rx_filled = 0; 714c5aff182SThomas Petazzoni } else { 715c5aff182SThomas Petazzoni val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 716c5aff182SThomas Petazzoni rx_filled -= 0xff; 717c5aff182SThomas Petazzoni } 718c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 719c5aff182SThomas Petazzoni } 720c5aff182SThomas Petazzoni } 721c5aff182SThomas Petazzoni 722c5aff182SThomas Petazzoni /* Get pointer to next RX descriptor to be processed by SW */ 723c5aff182SThomas Petazzoni static struct mvneta_rx_desc * 724c5aff182SThomas Petazzoni mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) 725c5aff182SThomas Petazzoni { 726c5aff182SThomas Petazzoni int rx_desc = rxq->next_desc_to_proc; 727c5aff182SThomas Petazzoni 728c5aff182SThomas Petazzoni rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); 72934e4179dSwilly tarreau prefetch(rxq->descs + rxq->next_desc_to_proc); 730c5aff182SThomas Petazzoni return rxq->descs + rx_desc; 731c5aff182SThomas Petazzoni } 732c5aff182SThomas Petazzoni 733c5aff182SThomas Petazzoni /* Change maximum receive size of the port. */ 734c5aff182SThomas Petazzoni static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) 735c5aff182SThomas Petazzoni { 736c5aff182SThomas Petazzoni u32 val; 737c5aff182SThomas Petazzoni 738c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 739c5aff182SThomas Petazzoni val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; 740c5aff182SThomas Petazzoni val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << 741c5aff182SThomas Petazzoni MVNETA_GMAC_MAX_RX_SIZE_SHIFT; 742c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 743c5aff182SThomas Petazzoni } 744c5aff182SThomas Petazzoni 745c5aff182SThomas Petazzoni 746c5aff182SThomas Petazzoni /* Set rx queue offset */ 747c5aff182SThomas Petazzoni static void mvneta_rxq_offset_set(struct mvneta_port *pp, 748c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, 749c5aff182SThomas Petazzoni int offset) 750c5aff182SThomas Petazzoni { 751c5aff182SThomas Petazzoni u32 val; 752c5aff182SThomas Petazzoni 753c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 754c5aff182SThomas Petazzoni val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; 755c5aff182SThomas Petazzoni 756c5aff182SThomas Petazzoni /* Offset is in */ 757c5aff182SThomas Petazzoni val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); 758c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 759c5aff182SThomas Petazzoni } 760c5aff182SThomas Petazzoni 761c5aff182SThomas Petazzoni 762c5aff182SThomas Petazzoni /* Tx descriptors helper methods */ 763c5aff182SThomas Petazzoni 764c5aff182SThomas Petazzoni /* Update HW with number of TX descriptors to be sent */ 765c5aff182SThomas Petazzoni static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, 766c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq, 767c5aff182SThomas Petazzoni int pend_desc) 768c5aff182SThomas Petazzoni { 769c5aff182SThomas Petazzoni u32 val; 770c5aff182SThomas Petazzoni 771c5aff182SThomas Petazzoni /* Only 255 descriptors can be added at once ; Assume caller 7726a20c175SThomas Petazzoni * process TX desriptors in quanta less than 256 7736a20c175SThomas Petazzoni */ 774c5aff182SThomas Petazzoni val = pend_desc; 775c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 776c5aff182SThomas Petazzoni } 777c5aff182SThomas Petazzoni 778c5aff182SThomas Petazzoni /* Get pointer to next TX descriptor to be processed (send) by HW */ 779c5aff182SThomas Petazzoni static struct mvneta_tx_desc * 780c5aff182SThomas Petazzoni mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) 781c5aff182SThomas Petazzoni { 782c5aff182SThomas Petazzoni int tx_desc = txq->next_desc_to_proc; 783c5aff182SThomas Petazzoni 784c5aff182SThomas Petazzoni txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); 785c5aff182SThomas Petazzoni return txq->descs + tx_desc; 786c5aff182SThomas Petazzoni } 787c5aff182SThomas Petazzoni 788c5aff182SThomas Petazzoni /* Release the last allocated TX descriptor. Useful to handle DMA 7896a20c175SThomas Petazzoni * mapping failures in the TX path. 7906a20c175SThomas Petazzoni */ 791c5aff182SThomas Petazzoni static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) 792c5aff182SThomas Petazzoni { 793c5aff182SThomas Petazzoni if (txq->next_desc_to_proc == 0) 794c5aff182SThomas Petazzoni txq->next_desc_to_proc = txq->last_desc - 1; 795c5aff182SThomas Petazzoni else 796c5aff182SThomas Petazzoni txq->next_desc_to_proc--; 797c5aff182SThomas Petazzoni } 798c5aff182SThomas Petazzoni 799c5aff182SThomas Petazzoni /* Set rxq buf size */ 800c5aff182SThomas Petazzoni static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, 801c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, 802c5aff182SThomas Petazzoni int buf_size) 803c5aff182SThomas Petazzoni { 804c5aff182SThomas Petazzoni u32 val; 805c5aff182SThomas Petazzoni 806c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); 807c5aff182SThomas Petazzoni 808c5aff182SThomas Petazzoni val &= ~MVNETA_RXQ_BUF_SIZE_MASK; 809c5aff182SThomas Petazzoni val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); 810c5aff182SThomas Petazzoni 811c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); 812c5aff182SThomas Petazzoni } 813c5aff182SThomas Petazzoni 814c5aff182SThomas Petazzoni /* Disable buffer management (BM) */ 815c5aff182SThomas Petazzoni static void mvneta_rxq_bm_disable(struct mvneta_port *pp, 816c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 817c5aff182SThomas Petazzoni { 818c5aff182SThomas Petazzoni u32 val; 819c5aff182SThomas Petazzoni 820c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 821c5aff182SThomas Petazzoni val &= ~MVNETA_RXQ_HW_BUF_ALLOC; 822c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 823c5aff182SThomas Petazzoni } 824c5aff182SThomas Petazzoni 825c5aff182SThomas Petazzoni /* Start the Ethernet port RX and TX activity */ 826c5aff182SThomas Petazzoni static void mvneta_port_up(struct mvneta_port *pp) 827c5aff182SThomas Petazzoni { 828c5aff182SThomas Petazzoni int queue; 829c5aff182SThomas Petazzoni u32 q_map; 830c5aff182SThomas Petazzoni 831c5aff182SThomas Petazzoni /* Enable all initialized TXs. */ 832c5aff182SThomas Petazzoni q_map = 0; 833c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 834c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq = &pp->txqs[queue]; 835c5aff182SThomas Petazzoni if (txq->descs != NULL) 836c5aff182SThomas Petazzoni q_map |= (1 << queue); 837c5aff182SThomas Petazzoni } 838c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 839c5aff182SThomas Petazzoni 840c5aff182SThomas Petazzoni /* Enable all initialized RXQs. */ 8412dcf75e2SGregory CLEMENT for (queue = 0; queue < rxq_number; queue++) { 8422dcf75e2SGregory CLEMENT struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 8432dcf75e2SGregory CLEMENT 8442dcf75e2SGregory CLEMENT if (rxq->descs != NULL) 8452dcf75e2SGregory CLEMENT q_map |= (1 << queue); 8462dcf75e2SGregory CLEMENT } 8472dcf75e2SGregory CLEMENT mvreg_write(pp, MVNETA_RXQ_CMD, q_map); 848c5aff182SThomas Petazzoni } 849c5aff182SThomas Petazzoni 850c5aff182SThomas Petazzoni /* Stop the Ethernet port activity */ 851c5aff182SThomas Petazzoni static void mvneta_port_down(struct mvneta_port *pp) 852c5aff182SThomas Petazzoni { 853c5aff182SThomas Petazzoni u32 val; 854c5aff182SThomas Petazzoni int count; 855c5aff182SThomas Petazzoni 856c5aff182SThomas Petazzoni /* Stop Rx port activity. Check port Rx activity. */ 857c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; 858c5aff182SThomas Petazzoni 859c5aff182SThomas Petazzoni /* Issue stop command for active channels only */ 860c5aff182SThomas Petazzoni if (val != 0) 861c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_CMD, 862c5aff182SThomas Petazzoni val << MVNETA_RXQ_DISABLE_SHIFT); 863c5aff182SThomas Petazzoni 864c5aff182SThomas Petazzoni /* Wait for all Rx activity to terminate. */ 865c5aff182SThomas Petazzoni count = 0; 866c5aff182SThomas Petazzoni do { 867c5aff182SThomas Petazzoni if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { 868c5aff182SThomas Petazzoni netdev_warn(pp->dev, 869c5aff182SThomas Petazzoni "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", 870c5aff182SThomas Petazzoni val); 871c5aff182SThomas Petazzoni break; 872c5aff182SThomas Petazzoni } 873c5aff182SThomas Petazzoni mdelay(1); 874c5aff182SThomas Petazzoni 875c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_CMD); 876c5aff182SThomas Petazzoni } while (val & 0xff); 877c5aff182SThomas Petazzoni 878c5aff182SThomas Petazzoni /* Stop Tx port activity. Check port Tx activity. Issue stop 8796a20c175SThomas Petazzoni * command for active channels only 8806a20c175SThomas Petazzoni */ 881c5aff182SThomas Petazzoni val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; 882c5aff182SThomas Petazzoni 883c5aff182SThomas Petazzoni if (val != 0) 884c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_CMD, 885c5aff182SThomas Petazzoni (val << MVNETA_TXQ_DISABLE_SHIFT)); 886c5aff182SThomas Petazzoni 887c5aff182SThomas Petazzoni /* Wait for all Tx activity to terminate. */ 888c5aff182SThomas Petazzoni count = 0; 889c5aff182SThomas Petazzoni do { 890c5aff182SThomas Petazzoni if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { 891c5aff182SThomas Petazzoni netdev_warn(pp->dev, 892c5aff182SThomas Petazzoni "TIMEOUT for TX stopped status=0x%08x\n", 893c5aff182SThomas Petazzoni val); 894c5aff182SThomas Petazzoni break; 895c5aff182SThomas Petazzoni } 896c5aff182SThomas Petazzoni mdelay(1); 897c5aff182SThomas Petazzoni 898c5aff182SThomas Petazzoni /* Check TX Command reg that all Txqs are stopped */ 899c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TXQ_CMD); 900c5aff182SThomas Petazzoni 901c5aff182SThomas Petazzoni } while (val & 0xff); 902c5aff182SThomas Petazzoni 903c5aff182SThomas Petazzoni /* Double check to verify that TX FIFO is empty */ 904c5aff182SThomas Petazzoni count = 0; 905c5aff182SThomas Petazzoni do { 906c5aff182SThomas Petazzoni if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { 907c5aff182SThomas Petazzoni netdev_warn(pp->dev, 908c5aff182SThomas Petazzoni "TX FIFO empty timeout status=0x08%x\n", 909c5aff182SThomas Petazzoni val); 910c5aff182SThomas Petazzoni break; 911c5aff182SThomas Petazzoni } 912c5aff182SThomas Petazzoni mdelay(1); 913c5aff182SThomas Petazzoni 914c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_PORT_STATUS); 915c5aff182SThomas Petazzoni } while (!(val & MVNETA_TX_FIFO_EMPTY) && 916c5aff182SThomas Petazzoni (val & MVNETA_TX_IN_PRGRS)); 917c5aff182SThomas Petazzoni 918c5aff182SThomas Petazzoni udelay(200); 919c5aff182SThomas Petazzoni } 920c5aff182SThomas Petazzoni 921c5aff182SThomas Petazzoni /* Enable the port by setting the port enable bit of the MAC control register */ 922c5aff182SThomas Petazzoni static void mvneta_port_enable(struct mvneta_port *pp) 923c5aff182SThomas Petazzoni { 924c5aff182SThomas Petazzoni u32 val; 925c5aff182SThomas Petazzoni 926c5aff182SThomas Petazzoni /* Enable port */ 927c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 928c5aff182SThomas Petazzoni val |= MVNETA_GMAC0_PORT_ENABLE; 929c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 930c5aff182SThomas Petazzoni } 931c5aff182SThomas Petazzoni 932c5aff182SThomas Petazzoni /* Disable the port and wait for about 200 usec before retuning */ 933c5aff182SThomas Petazzoni static void mvneta_port_disable(struct mvneta_port *pp) 934c5aff182SThomas Petazzoni { 935c5aff182SThomas Petazzoni u32 val; 936c5aff182SThomas Petazzoni 937c5aff182SThomas Petazzoni /* Reset the Enable bit in the Serial Control Register */ 938c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 939c5aff182SThomas Petazzoni val &= ~MVNETA_GMAC0_PORT_ENABLE; 940c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 941c5aff182SThomas Petazzoni 942c5aff182SThomas Petazzoni udelay(200); 943c5aff182SThomas Petazzoni } 944c5aff182SThomas Petazzoni 945c5aff182SThomas Petazzoni /* Multicast tables methods */ 946c5aff182SThomas Petazzoni 947c5aff182SThomas Petazzoni /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ 948c5aff182SThomas Petazzoni static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) 949c5aff182SThomas Petazzoni { 950c5aff182SThomas Petazzoni int offset; 951c5aff182SThomas Petazzoni u32 val; 952c5aff182SThomas Petazzoni 953c5aff182SThomas Petazzoni if (queue == -1) { 954c5aff182SThomas Petazzoni val = 0; 955c5aff182SThomas Petazzoni } else { 956c5aff182SThomas Petazzoni val = 0x1 | (queue << 1); 957c5aff182SThomas Petazzoni val |= (val << 24) | (val << 16) | (val << 8); 958c5aff182SThomas Petazzoni } 959c5aff182SThomas Petazzoni 960c5aff182SThomas Petazzoni for (offset = 0; offset <= 0xc; offset += 4) 961c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); 962c5aff182SThomas Petazzoni } 963c5aff182SThomas Petazzoni 964c5aff182SThomas Petazzoni /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ 965c5aff182SThomas Petazzoni static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) 966c5aff182SThomas Petazzoni { 967c5aff182SThomas Petazzoni int offset; 968c5aff182SThomas Petazzoni u32 val; 969c5aff182SThomas Petazzoni 970c5aff182SThomas Petazzoni if (queue == -1) { 971c5aff182SThomas Petazzoni val = 0; 972c5aff182SThomas Petazzoni } else { 973c5aff182SThomas Petazzoni val = 0x1 | (queue << 1); 974c5aff182SThomas Petazzoni val |= (val << 24) | (val << 16) | (val << 8); 975c5aff182SThomas Petazzoni } 976c5aff182SThomas Petazzoni 977c5aff182SThomas Petazzoni for (offset = 0; offset <= 0xfc; offset += 4) 978c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); 979c5aff182SThomas Petazzoni 980c5aff182SThomas Petazzoni } 981c5aff182SThomas Petazzoni 982c5aff182SThomas Petazzoni /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ 983c5aff182SThomas Petazzoni static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) 984c5aff182SThomas Petazzoni { 985c5aff182SThomas Petazzoni int offset; 986c5aff182SThomas Petazzoni u32 val; 987c5aff182SThomas Petazzoni 988c5aff182SThomas Petazzoni if (queue == -1) { 989c5aff182SThomas Petazzoni memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); 990c5aff182SThomas Petazzoni val = 0; 991c5aff182SThomas Petazzoni } else { 992c5aff182SThomas Petazzoni memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); 993c5aff182SThomas Petazzoni val = 0x1 | (queue << 1); 994c5aff182SThomas Petazzoni val |= (val << 24) | (val << 16) | (val << 8); 995c5aff182SThomas Petazzoni } 996c5aff182SThomas Petazzoni 997c5aff182SThomas Petazzoni for (offset = 0; offset <= 0xfc; offset += 4) 998c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); 999c5aff182SThomas Petazzoni } 1000c5aff182SThomas Petazzoni 10010c0744fcSStas Sergeev static void mvneta_set_autoneg(struct mvneta_port *pp, int enable) 10020c0744fcSStas Sergeev { 10030c0744fcSStas Sergeev u32 val; 10040c0744fcSStas Sergeev 10050c0744fcSStas Sergeev if (enable) { 10060c0744fcSStas Sergeev val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 10070c0744fcSStas Sergeev val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | 10080c0744fcSStas Sergeev MVNETA_GMAC_FORCE_LINK_DOWN | 10090c0744fcSStas Sergeev MVNETA_GMAC_AN_FLOW_CTRL_EN); 10100c0744fcSStas Sergeev val |= MVNETA_GMAC_INBAND_AN_ENABLE | 10110c0744fcSStas Sergeev MVNETA_GMAC_AN_SPEED_EN | 10120c0744fcSStas Sergeev MVNETA_GMAC_AN_DUPLEX_EN; 10130c0744fcSStas Sergeev mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 10140c0744fcSStas Sergeev 10150c0744fcSStas Sergeev val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); 10160c0744fcSStas Sergeev val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; 10170c0744fcSStas Sergeev mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); 10180c0744fcSStas Sergeev 10190c0744fcSStas Sergeev val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 10200c0744fcSStas Sergeev val |= MVNETA_GMAC2_INBAND_AN_ENABLE; 10210c0744fcSStas Sergeev mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 10220c0744fcSStas Sergeev } else { 10230c0744fcSStas Sergeev val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 10240c0744fcSStas Sergeev val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | 10250c0744fcSStas Sergeev MVNETA_GMAC_AN_SPEED_EN | 10260c0744fcSStas Sergeev MVNETA_GMAC_AN_DUPLEX_EN); 10270c0744fcSStas Sergeev mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 10280c0744fcSStas Sergeev 10290c0744fcSStas Sergeev val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); 10300c0744fcSStas Sergeev val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; 10310c0744fcSStas Sergeev mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); 10320c0744fcSStas Sergeev 10330c0744fcSStas Sergeev val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 10340c0744fcSStas Sergeev val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE; 10350c0744fcSStas Sergeev mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 10360c0744fcSStas Sergeev } 10370c0744fcSStas Sergeev } 10380c0744fcSStas Sergeev 1039c5aff182SThomas Petazzoni /* This method sets defaults to the NETA port: 1040c5aff182SThomas Petazzoni * Clears interrupt Cause and Mask registers. 1041c5aff182SThomas Petazzoni * Clears all MAC tables. 1042c5aff182SThomas Petazzoni * Sets defaults to all registers. 1043c5aff182SThomas Petazzoni * Resets RX and TX descriptor rings. 1044c5aff182SThomas Petazzoni * Resets PHY. 1045c5aff182SThomas Petazzoni * This method can be called after mvneta_port_down() to return the port 1046c5aff182SThomas Petazzoni * settings to defaults. 1047c5aff182SThomas Petazzoni */ 1048c5aff182SThomas Petazzoni static void mvneta_defaults_set(struct mvneta_port *pp) 1049c5aff182SThomas Petazzoni { 1050c5aff182SThomas Petazzoni int cpu; 1051c5aff182SThomas Petazzoni int queue; 1052c5aff182SThomas Petazzoni u32 val; 10532dcf75e2SGregory CLEMENT int max_cpu = num_present_cpus(); 1054c5aff182SThomas Petazzoni 1055c5aff182SThomas Petazzoni /* Clear all Cause registers */ 1056c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1057c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 1058c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 1059c5aff182SThomas Petazzoni 1060c5aff182SThomas Petazzoni /* Mask all interrupts */ 1061c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1062c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 1063c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 1064c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1065c5aff182SThomas Petazzoni 1066c5aff182SThomas Petazzoni /* Enable MBUS Retry bit16 */ 1067c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); 1068c5aff182SThomas Petazzoni 106950bf8cb6SGregory CLEMENT /* Set CPU queue access map. CPUs are assigned to the RX and 107050bf8cb6SGregory CLEMENT * TX queues modulo their number. If there is only one TX 107150bf8cb6SGregory CLEMENT * queue then it is assigned to the CPU associated to the 107250bf8cb6SGregory CLEMENT * default RX queue. 10736a20c175SThomas Petazzoni */ 10742dcf75e2SGregory CLEMENT for_each_present_cpu(cpu) { 10752dcf75e2SGregory CLEMENT int rxq_map = 0, txq_map = 0; 107650bf8cb6SGregory CLEMENT int rxq, txq; 10772dcf75e2SGregory CLEMENT 10782dcf75e2SGregory CLEMENT for (rxq = 0; rxq < rxq_number; rxq++) 10792dcf75e2SGregory CLEMENT if ((rxq % max_cpu) == cpu) 10802dcf75e2SGregory CLEMENT rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 10812dcf75e2SGregory CLEMENT 108250bf8cb6SGregory CLEMENT for (txq = 0; txq < txq_number; txq++) 108350bf8cb6SGregory CLEMENT if ((txq % max_cpu) == cpu) 108450bf8cb6SGregory CLEMENT txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); 108550bf8cb6SGregory CLEMENT 108650bf8cb6SGregory CLEMENT /* With only one TX queue we configure a special case 108750bf8cb6SGregory CLEMENT * which will allow to get all the irq on a single 108850bf8cb6SGregory CLEMENT * CPU 108950bf8cb6SGregory CLEMENT */ 109050bf8cb6SGregory CLEMENT if (txq_number == 1) 109150bf8cb6SGregory CLEMENT txq_map = (cpu == pp->rxq_def) ? 109250bf8cb6SGregory CLEMENT MVNETA_CPU_TXQ_ACCESS(1) : 0; 10932dcf75e2SGregory CLEMENT 10942dcf75e2SGregory CLEMENT mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 10952dcf75e2SGregory CLEMENT } 1096c5aff182SThomas Petazzoni 1097c5aff182SThomas Petazzoni /* Reset RX and TX DMAs */ 1098c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 1099c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 1100c5aff182SThomas Petazzoni 1101c5aff182SThomas Petazzoni /* Disable Legacy WRR, Disable EJP, Release from reset */ 1102c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); 1103c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 1104c5aff182SThomas Petazzoni mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); 1105c5aff182SThomas Petazzoni mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); 1106c5aff182SThomas Petazzoni } 1107c5aff182SThomas Petazzoni 1108c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 1109c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 1110c5aff182SThomas Petazzoni 1111c5aff182SThomas Petazzoni /* Set Port Acceleration Mode */ 1112c5aff182SThomas Petazzoni val = MVNETA_ACC_MODE_EXT; 1113c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_ACC_MODE, val); 1114c5aff182SThomas Petazzoni 1115c5aff182SThomas Petazzoni /* Update val of portCfg register accordingly with all RxQueue types */ 111690b74c01SGregory CLEMENT val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 1117c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_CONFIG, val); 1118c5aff182SThomas Petazzoni 1119c5aff182SThomas Petazzoni val = 0; 1120c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); 1121c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); 1122c5aff182SThomas Petazzoni 1123c5aff182SThomas Petazzoni /* Build PORT_SDMA_CONFIG_REG */ 1124c5aff182SThomas Petazzoni val = 0; 1125c5aff182SThomas Petazzoni 1126c5aff182SThomas Petazzoni /* Default burst size */ 1127c5aff182SThomas Petazzoni val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 1128c5aff182SThomas Petazzoni val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 11299ad8fef6SThomas Petazzoni val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; 1130c5aff182SThomas Petazzoni 11319ad8fef6SThomas Petazzoni #if defined(__BIG_ENDIAN) 11329ad8fef6SThomas Petazzoni val |= MVNETA_DESC_SWAP; 11339ad8fef6SThomas Petazzoni #endif 1134c5aff182SThomas Petazzoni 1135c5aff182SThomas Petazzoni /* Assign port SDMA configuration */ 1136c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 1137c5aff182SThomas Petazzoni 113871408602SThomas Petazzoni /* Disable PHY polling in hardware, since we're using the 113971408602SThomas Petazzoni * kernel phylib to do this. 114071408602SThomas Petazzoni */ 114171408602SThomas Petazzoni val = mvreg_read(pp, MVNETA_UNIT_CONTROL); 114271408602SThomas Petazzoni val &= ~MVNETA_PHY_POLLING_ENABLE; 114371408602SThomas Petazzoni mvreg_write(pp, MVNETA_UNIT_CONTROL, val); 114471408602SThomas Petazzoni 11450c0744fcSStas Sergeev mvneta_set_autoneg(pp, pp->use_inband_status); 1146c5aff182SThomas Petazzoni mvneta_set_ucast_table(pp, -1); 1147c5aff182SThomas Petazzoni mvneta_set_special_mcast_table(pp, -1); 1148c5aff182SThomas Petazzoni mvneta_set_other_mcast_table(pp, -1); 1149c5aff182SThomas Petazzoni 1150c5aff182SThomas Petazzoni /* Set port interrupt enable register - default enable all */ 1151c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_ENABLE, 1152c5aff182SThomas Petazzoni (MVNETA_RXQ_INTR_ENABLE_ALL_MASK 1153c5aff182SThomas Petazzoni | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); 1154e483911fSAndrew Lunn 1155e483911fSAndrew Lunn mvneta_mib_counters_clear(pp); 1156c5aff182SThomas Petazzoni } 1157c5aff182SThomas Petazzoni 1158c5aff182SThomas Petazzoni /* Set max sizes for tx queues */ 1159c5aff182SThomas Petazzoni static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) 1160c5aff182SThomas Petazzoni 1161c5aff182SThomas Petazzoni { 1162c5aff182SThomas Petazzoni u32 val, size, mtu; 1163c5aff182SThomas Petazzoni int queue; 1164c5aff182SThomas Petazzoni 1165c5aff182SThomas Petazzoni mtu = max_tx_size * 8; 1166c5aff182SThomas Petazzoni if (mtu > MVNETA_TX_MTU_MAX) 1167c5aff182SThomas Petazzoni mtu = MVNETA_TX_MTU_MAX; 1168c5aff182SThomas Petazzoni 1169c5aff182SThomas Petazzoni /* Set MTU */ 1170c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TX_MTU); 1171c5aff182SThomas Petazzoni val &= ~MVNETA_TX_MTU_MAX; 1172c5aff182SThomas Petazzoni val |= mtu; 1173c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TX_MTU, val); 1174c5aff182SThomas Petazzoni 1175c5aff182SThomas Petazzoni /* TX token size and all TXQs token size must be larger that MTU */ 1176c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); 1177c5aff182SThomas Petazzoni 1178c5aff182SThomas Petazzoni size = val & MVNETA_TX_TOKEN_SIZE_MAX; 1179c5aff182SThomas Petazzoni if (size < mtu) { 1180c5aff182SThomas Petazzoni size = mtu; 1181c5aff182SThomas Petazzoni val &= ~MVNETA_TX_TOKEN_SIZE_MAX; 1182c5aff182SThomas Petazzoni val |= size; 1183c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); 1184c5aff182SThomas Petazzoni } 1185c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 1186c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); 1187c5aff182SThomas Petazzoni 1188c5aff182SThomas Petazzoni size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; 1189c5aff182SThomas Petazzoni if (size < mtu) { 1190c5aff182SThomas Petazzoni size = mtu; 1191c5aff182SThomas Petazzoni val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; 1192c5aff182SThomas Petazzoni val |= size; 1193c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); 1194c5aff182SThomas Petazzoni } 1195c5aff182SThomas Petazzoni } 1196c5aff182SThomas Petazzoni } 1197c5aff182SThomas Petazzoni 1198c5aff182SThomas Petazzoni /* Set unicast address */ 1199c5aff182SThomas Petazzoni static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, 1200c5aff182SThomas Petazzoni int queue) 1201c5aff182SThomas Petazzoni { 1202c5aff182SThomas Petazzoni unsigned int unicast_reg; 1203c5aff182SThomas Petazzoni unsigned int tbl_offset; 1204c5aff182SThomas Petazzoni unsigned int reg_offset; 1205c5aff182SThomas Petazzoni 1206c5aff182SThomas Petazzoni /* Locate the Unicast table entry */ 1207c5aff182SThomas Petazzoni last_nibble = (0xf & last_nibble); 1208c5aff182SThomas Petazzoni 1209c5aff182SThomas Petazzoni /* offset from unicast tbl base */ 1210c5aff182SThomas Petazzoni tbl_offset = (last_nibble / 4) * 4; 1211c5aff182SThomas Petazzoni 1212c5aff182SThomas Petazzoni /* offset within the above reg */ 1213c5aff182SThomas Petazzoni reg_offset = last_nibble % 4; 1214c5aff182SThomas Petazzoni 1215c5aff182SThomas Petazzoni unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); 1216c5aff182SThomas Petazzoni 1217c5aff182SThomas Petazzoni if (queue == -1) { 1218c5aff182SThomas Petazzoni /* Clear accepts frame bit at specified unicast DA tbl entry */ 1219c5aff182SThomas Petazzoni unicast_reg &= ~(0xff << (8 * reg_offset)); 1220c5aff182SThomas Petazzoni } else { 1221c5aff182SThomas Petazzoni unicast_reg &= ~(0xff << (8 * reg_offset)); 1222c5aff182SThomas Petazzoni unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1223c5aff182SThomas Petazzoni } 1224c5aff182SThomas Petazzoni 1225c5aff182SThomas Petazzoni mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); 1226c5aff182SThomas Petazzoni } 1227c5aff182SThomas Petazzoni 1228c5aff182SThomas Petazzoni /* Set mac address */ 1229c5aff182SThomas Petazzoni static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, 1230c5aff182SThomas Petazzoni int queue) 1231c5aff182SThomas Petazzoni { 1232c5aff182SThomas Petazzoni unsigned int mac_h; 1233c5aff182SThomas Petazzoni unsigned int mac_l; 1234c5aff182SThomas Petazzoni 1235c5aff182SThomas Petazzoni if (queue != -1) { 1236c5aff182SThomas Petazzoni mac_l = (addr[4] << 8) | (addr[5]); 1237c5aff182SThomas Petazzoni mac_h = (addr[0] << 24) | (addr[1] << 16) | 1238c5aff182SThomas Petazzoni (addr[2] << 8) | (addr[3] << 0); 1239c5aff182SThomas Petazzoni 1240c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); 1241c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); 1242c5aff182SThomas Petazzoni } 1243c5aff182SThomas Petazzoni 1244c5aff182SThomas Petazzoni /* Accept frames of this address */ 1245c5aff182SThomas Petazzoni mvneta_set_ucast_addr(pp, addr[5], queue); 1246c5aff182SThomas Petazzoni } 1247c5aff182SThomas Petazzoni 12486a20c175SThomas Petazzoni /* Set the number of packets that will be received before RX interrupt 12496a20c175SThomas Petazzoni * will be generated by HW. 1250c5aff182SThomas Petazzoni */ 1251c5aff182SThomas Petazzoni static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, 1252c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, u32 value) 1253c5aff182SThomas Petazzoni { 1254c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), 1255c5aff182SThomas Petazzoni value | MVNETA_RXQ_NON_OCCUPIED(0)); 1256c5aff182SThomas Petazzoni rxq->pkts_coal = value; 1257c5aff182SThomas Petazzoni } 1258c5aff182SThomas Petazzoni 12596a20c175SThomas Petazzoni /* Set the time delay in usec before RX interrupt will be generated by 12606a20c175SThomas Petazzoni * HW. 1261c5aff182SThomas Petazzoni */ 1262c5aff182SThomas Petazzoni static void mvneta_rx_time_coal_set(struct mvneta_port *pp, 1263c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, u32 value) 1264c5aff182SThomas Petazzoni { 1265189dd626SThomas Petazzoni u32 val; 1266189dd626SThomas Petazzoni unsigned long clk_rate; 1267189dd626SThomas Petazzoni 1268189dd626SThomas Petazzoni clk_rate = clk_get_rate(pp->clk); 1269189dd626SThomas Petazzoni val = (clk_rate / 1000000) * value; 1270c5aff182SThomas Petazzoni 1271c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); 1272c5aff182SThomas Petazzoni rxq->time_coal = value; 1273c5aff182SThomas Petazzoni } 1274c5aff182SThomas Petazzoni 1275c5aff182SThomas Petazzoni /* Set threshold for TX_DONE pkts coalescing */ 1276c5aff182SThomas Petazzoni static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, 1277c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq, u32 value) 1278c5aff182SThomas Petazzoni { 1279c5aff182SThomas Petazzoni u32 val; 1280c5aff182SThomas Petazzoni 1281c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); 1282c5aff182SThomas Petazzoni 1283c5aff182SThomas Petazzoni val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; 1284c5aff182SThomas Petazzoni val |= MVNETA_TXQ_SENT_THRESH_MASK(value); 1285c5aff182SThomas Petazzoni 1286c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); 1287c5aff182SThomas Petazzoni 1288c5aff182SThomas Petazzoni txq->done_pkts_coal = value; 1289c5aff182SThomas Petazzoni } 1290c5aff182SThomas Petazzoni 1291c5aff182SThomas Petazzoni /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ 1292c5aff182SThomas Petazzoni static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, 1293c5aff182SThomas Petazzoni u32 phys_addr, u32 cookie) 1294c5aff182SThomas Petazzoni { 1295c5aff182SThomas Petazzoni rx_desc->buf_cookie = cookie; 1296c5aff182SThomas Petazzoni rx_desc->buf_phys_addr = phys_addr; 1297c5aff182SThomas Petazzoni } 1298c5aff182SThomas Petazzoni 1299c5aff182SThomas Petazzoni /* Decrement sent descriptors counter */ 1300c5aff182SThomas Petazzoni static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, 1301c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq, 1302c5aff182SThomas Petazzoni int sent_desc) 1303c5aff182SThomas Petazzoni { 1304c5aff182SThomas Petazzoni u32 val; 1305c5aff182SThomas Petazzoni 1306c5aff182SThomas Petazzoni /* Only 255 TX descriptors can be updated at once */ 1307c5aff182SThomas Petazzoni while (sent_desc > 0xff) { 1308c5aff182SThomas Petazzoni val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; 1309c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1310c5aff182SThomas Petazzoni sent_desc = sent_desc - 0xff; 1311c5aff182SThomas Petazzoni } 1312c5aff182SThomas Petazzoni 1313c5aff182SThomas Petazzoni val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; 1314c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1315c5aff182SThomas Petazzoni } 1316c5aff182SThomas Petazzoni 1317c5aff182SThomas Petazzoni /* Get number of TX descriptors already sent by HW */ 1318c5aff182SThomas Petazzoni static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, 1319c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 1320c5aff182SThomas Petazzoni { 1321c5aff182SThomas Petazzoni u32 val; 1322c5aff182SThomas Petazzoni int sent_desc; 1323c5aff182SThomas Petazzoni 1324c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); 1325c5aff182SThomas Petazzoni sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> 1326c5aff182SThomas Petazzoni MVNETA_TXQ_SENT_DESC_SHIFT; 1327c5aff182SThomas Petazzoni 1328c5aff182SThomas Petazzoni return sent_desc; 1329c5aff182SThomas Petazzoni } 1330c5aff182SThomas Petazzoni 13316a20c175SThomas Petazzoni /* Get number of sent descriptors and decrement counter. 1332c5aff182SThomas Petazzoni * The number of sent descriptors is returned. 1333c5aff182SThomas Petazzoni */ 1334c5aff182SThomas Petazzoni static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, 1335c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 1336c5aff182SThomas Petazzoni { 1337c5aff182SThomas Petazzoni int sent_desc; 1338c5aff182SThomas Petazzoni 1339c5aff182SThomas Petazzoni /* Get number of sent descriptors */ 1340c5aff182SThomas Petazzoni sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); 1341c5aff182SThomas Petazzoni 1342c5aff182SThomas Petazzoni /* Decrement sent descriptors counter */ 1343c5aff182SThomas Petazzoni if (sent_desc) 1344c5aff182SThomas Petazzoni mvneta_txq_sent_desc_dec(pp, txq, sent_desc); 1345c5aff182SThomas Petazzoni 1346c5aff182SThomas Petazzoni return sent_desc; 1347c5aff182SThomas Petazzoni } 1348c5aff182SThomas Petazzoni 1349c5aff182SThomas Petazzoni /* Set TXQ descriptors fields relevant for CSUM calculation */ 1350c5aff182SThomas Petazzoni static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, 1351c5aff182SThomas Petazzoni int ip_hdr_len, int l4_proto) 1352c5aff182SThomas Petazzoni { 1353c5aff182SThomas Petazzoni u32 command; 1354c5aff182SThomas Petazzoni 1355c5aff182SThomas Petazzoni /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 13566a20c175SThomas Petazzoni * G_L4_chk, L4_type; required only for checksum 13576a20c175SThomas Petazzoni * calculation 13586a20c175SThomas Petazzoni */ 1359c5aff182SThomas Petazzoni command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1360c5aff182SThomas Petazzoni command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1361c5aff182SThomas Petazzoni 13620a198587SThomas Fitzsimmons if (l3_proto == htons(ETH_P_IP)) 1363c5aff182SThomas Petazzoni command |= MVNETA_TXD_IP_CSUM; 1364c5aff182SThomas Petazzoni else 1365c5aff182SThomas Petazzoni command |= MVNETA_TX_L3_IP6; 1366c5aff182SThomas Petazzoni 1367c5aff182SThomas Petazzoni if (l4_proto == IPPROTO_TCP) 1368c5aff182SThomas Petazzoni command |= MVNETA_TX_L4_CSUM_FULL; 1369c5aff182SThomas Petazzoni else if (l4_proto == IPPROTO_UDP) 1370c5aff182SThomas Petazzoni command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; 1371c5aff182SThomas Petazzoni else 1372c5aff182SThomas Petazzoni command |= MVNETA_TX_L4_CSUM_NOT; 1373c5aff182SThomas Petazzoni 1374c5aff182SThomas Petazzoni return command; 1375c5aff182SThomas Petazzoni } 1376c5aff182SThomas Petazzoni 1377c5aff182SThomas Petazzoni 1378c5aff182SThomas Petazzoni /* Display more error info */ 1379c5aff182SThomas Petazzoni static void mvneta_rx_error(struct mvneta_port *pp, 1380c5aff182SThomas Petazzoni struct mvneta_rx_desc *rx_desc) 1381c5aff182SThomas Petazzoni { 1382c5aff182SThomas Petazzoni u32 status = rx_desc->status; 1383c5aff182SThomas Petazzoni 13845428213cSwilly tarreau if (!mvneta_rxq_desc_is_first_last(status)) { 1385c5aff182SThomas Petazzoni netdev_err(pp->dev, 1386c5aff182SThomas Petazzoni "bad rx status %08x (buffer oversize), size=%d\n", 13875428213cSwilly tarreau status, rx_desc->data_size); 1388c5aff182SThomas Petazzoni return; 1389c5aff182SThomas Petazzoni } 1390c5aff182SThomas Petazzoni 1391c5aff182SThomas Petazzoni switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1392c5aff182SThomas Petazzoni case MVNETA_RXD_ERR_CRC: 1393c5aff182SThomas Petazzoni netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", 1394c5aff182SThomas Petazzoni status, rx_desc->data_size); 1395c5aff182SThomas Petazzoni break; 1396c5aff182SThomas Petazzoni case MVNETA_RXD_ERR_OVERRUN: 1397c5aff182SThomas Petazzoni netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", 1398c5aff182SThomas Petazzoni status, rx_desc->data_size); 1399c5aff182SThomas Petazzoni break; 1400c5aff182SThomas Petazzoni case MVNETA_RXD_ERR_LEN: 1401c5aff182SThomas Petazzoni netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", 1402c5aff182SThomas Petazzoni status, rx_desc->data_size); 1403c5aff182SThomas Petazzoni break; 1404c5aff182SThomas Petazzoni case MVNETA_RXD_ERR_RESOURCE: 1405c5aff182SThomas Petazzoni netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", 1406c5aff182SThomas Petazzoni status, rx_desc->data_size); 1407c5aff182SThomas Petazzoni break; 1408c5aff182SThomas Petazzoni } 1409c5aff182SThomas Petazzoni } 1410c5aff182SThomas Petazzoni 14115428213cSwilly tarreau /* Handle RX checksum offload based on the descriptor's status */ 14125428213cSwilly tarreau static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, 1413c5aff182SThomas Petazzoni struct sk_buff *skb) 1414c5aff182SThomas Petazzoni { 14155428213cSwilly tarreau if ((status & MVNETA_RXD_L3_IP4) && 14165428213cSwilly tarreau (status & MVNETA_RXD_L4_CSUM_OK)) { 1417c5aff182SThomas Petazzoni skb->csum = 0; 1418c5aff182SThomas Petazzoni skb->ip_summed = CHECKSUM_UNNECESSARY; 1419c5aff182SThomas Petazzoni return; 1420c5aff182SThomas Petazzoni } 1421c5aff182SThomas Petazzoni 1422c5aff182SThomas Petazzoni skb->ip_summed = CHECKSUM_NONE; 1423c5aff182SThomas Petazzoni } 1424c5aff182SThomas Petazzoni 14256c498974Swilly tarreau /* Return tx queue pointer (find last set bit) according to <cause> returned 14266c498974Swilly tarreau * form tx_done reg. <cause> must not be null. The return value is always a 14276c498974Swilly tarreau * valid queue for matching the first one found in <cause>. 14286c498974Swilly tarreau */ 1429c5aff182SThomas Petazzoni static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, 1430c5aff182SThomas Petazzoni u32 cause) 1431c5aff182SThomas Petazzoni { 1432c5aff182SThomas Petazzoni int queue = fls(cause) - 1; 1433c5aff182SThomas Petazzoni 14346c498974Swilly tarreau return &pp->txqs[queue]; 1435c5aff182SThomas Petazzoni } 1436c5aff182SThomas Petazzoni 1437c5aff182SThomas Petazzoni /* Free tx queue skbuffs */ 1438c5aff182SThomas Petazzoni static void mvneta_txq_bufs_free(struct mvneta_port *pp, 1439c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq, int num) 1440c5aff182SThomas Petazzoni { 1441c5aff182SThomas Petazzoni int i; 1442c5aff182SThomas Petazzoni 1443c5aff182SThomas Petazzoni for (i = 0; i < num; i++) { 1444c5aff182SThomas Petazzoni struct mvneta_tx_desc *tx_desc = txq->descs + 1445c5aff182SThomas Petazzoni txq->txq_get_index; 1446c5aff182SThomas Petazzoni struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; 1447c5aff182SThomas Petazzoni 1448c5aff182SThomas Petazzoni mvneta_txq_inc_get(txq); 1449c5aff182SThomas Petazzoni 14502e3173a3SEzequiel Garcia if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) 14512e3173a3SEzequiel Garcia dma_unmap_single(pp->dev->dev.parent, 14522e3173a3SEzequiel Garcia tx_desc->buf_phys_addr, 1453c5aff182SThomas Petazzoni tx_desc->data_size, DMA_TO_DEVICE); 1454ba7e46efSEzequiel Garcia if (!skb) 1455ba7e46efSEzequiel Garcia continue; 1456c5aff182SThomas Petazzoni dev_kfree_skb_any(skb); 1457c5aff182SThomas Petazzoni } 1458c5aff182SThomas Petazzoni } 1459c5aff182SThomas Petazzoni 1460c5aff182SThomas Petazzoni /* Handle end of transmission */ 1461cd713199SArnaud Ebalard static void mvneta_txq_done(struct mvneta_port *pp, 1462c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 1463c5aff182SThomas Petazzoni { 1464c5aff182SThomas Petazzoni struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 1465c5aff182SThomas Petazzoni int tx_done; 1466c5aff182SThomas Petazzoni 1467c5aff182SThomas Petazzoni tx_done = mvneta_txq_sent_desc_proc(pp, txq); 1468cd713199SArnaud Ebalard if (!tx_done) 1469cd713199SArnaud Ebalard return; 1470cd713199SArnaud Ebalard 1471c5aff182SThomas Petazzoni mvneta_txq_bufs_free(pp, txq, tx_done); 1472c5aff182SThomas Petazzoni 1473c5aff182SThomas Petazzoni txq->count -= tx_done; 1474c5aff182SThomas Petazzoni 1475c5aff182SThomas Petazzoni if (netif_tx_queue_stopped(nq)) { 14768eef5f97SEzequiel Garcia if (txq->count <= txq->tx_wake_threshold) 1477c5aff182SThomas Petazzoni netif_tx_wake_queue(nq); 1478c5aff182SThomas Petazzoni } 1479c5aff182SThomas Petazzoni } 1480c5aff182SThomas Petazzoni 14818ec2cd48Swilly tarreau static void *mvneta_frag_alloc(const struct mvneta_port *pp) 14828ec2cd48Swilly tarreau { 14838ec2cd48Swilly tarreau if (likely(pp->frag_size <= PAGE_SIZE)) 14848ec2cd48Swilly tarreau return netdev_alloc_frag(pp->frag_size); 14858ec2cd48Swilly tarreau else 14868ec2cd48Swilly tarreau return kmalloc(pp->frag_size, GFP_ATOMIC); 14878ec2cd48Swilly tarreau } 14888ec2cd48Swilly tarreau 14898ec2cd48Swilly tarreau static void mvneta_frag_free(const struct mvneta_port *pp, void *data) 14908ec2cd48Swilly tarreau { 14918ec2cd48Swilly tarreau if (likely(pp->frag_size <= PAGE_SIZE)) 149213dc0d2bSAlexander Duyck skb_free_frag(data); 14938ec2cd48Swilly tarreau else 14948ec2cd48Swilly tarreau kfree(data); 14958ec2cd48Swilly tarreau } 14968ec2cd48Swilly tarreau 1497c5aff182SThomas Petazzoni /* Refill processing */ 1498c5aff182SThomas Petazzoni static int mvneta_rx_refill(struct mvneta_port *pp, 1499c5aff182SThomas Petazzoni struct mvneta_rx_desc *rx_desc) 1500c5aff182SThomas Petazzoni 1501c5aff182SThomas Petazzoni { 1502c5aff182SThomas Petazzoni dma_addr_t phys_addr; 15038ec2cd48Swilly tarreau void *data; 1504c5aff182SThomas Petazzoni 15058ec2cd48Swilly tarreau data = mvneta_frag_alloc(pp); 15068ec2cd48Swilly tarreau if (!data) 1507c5aff182SThomas Petazzoni return -ENOMEM; 1508c5aff182SThomas Petazzoni 15098ec2cd48Swilly tarreau phys_addr = dma_map_single(pp->dev->dev.parent, data, 1510c5aff182SThomas Petazzoni MVNETA_RX_BUF_SIZE(pp->pkt_size), 1511c5aff182SThomas Petazzoni DMA_FROM_DEVICE); 1512c5aff182SThomas Petazzoni if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { 15138ec2cd48Swilly tarreau mvneta_frag_free(pp, data); 1514c5aff182SThomas Petazzoni return -ENOMEM; 1515c5aff182SThomas Petazzoni } 1516c5aff182SThomas Petazzoni 15178ec2cd48Swilly tarreau mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data); 1518c5aff182SThomas Petazzoni return 0; 1519c5aff182SThomas Petazzoni } 1520c5aff182SThomas Petazzoni 1521c5aff182SThomas Petazzoni /* Handle tx checksum */ 1522c5aff182SThomas Petazzoni static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) 1523c5aff182SThomas Petazzoni { 1524c5aff182SThomas Petazzoni if (skb->ip_summed == CHECKSUM_PARTIAL) { 1525c5aff182SThomas Petazzoni int ip_hdr_len = 0; 1526817dbfa5SVlad Yasevich __be16 l3_proto = vlan_get_protocol(skb); 1527c5aff182SThomas Petazzoni u8 l4_proto; 1528c5aff182SThomas Petazzoni 1529817dbfa5SVlad Yasevich if (l3_proto == htons(ETH_P_IP)) { 1530c5aff182SThomas Petazzoni struct iphdr *ip4h = ip_hdr(skb); 1531c5aff182SThomas Petazzoni 1532c5aff182SThomas Petazzoni /* Calculate IPv4 checksum and L4 checksum */ 1533c5aff182SThomas Petazzoni ip_hdr_len = ip4h->ihl; 1534c5aff182SThomas Petazzoni l4_proto = ip4h->protocol; 1535817dbfa5SVlad Yasevich } else if (l3_proto == htons(ETH_P_IPV6)) { 1536c5aff182SThomas Petazzoni struct ipv6hdr *ip6h = ipv6_hdr(skb); 1537c5aff182SThomas Petazzoni 1538c5aff182SThomas Petazzoni /* Read l4_protocol from one of IPv6 extra headers */ 1539c5aff182SThomas Petazzoni if (skb_network_header_len(skb) > 0) 1540c5aff182SThomas Petazzoni ip_hdr_len = (skb_network_header_len(skb) >> 2); 1541c5aff182SThomas Petazzoni l4_proto = ip6h->nexthdr; 1542c5aff182SThomas Petazzoni } else 1543c5aff182SThomas Petazzoni return MVNETA_TX_L4_CSUM_NOT; 1544c5aff182SThomas Petazzoni 1545c5aff182SThomas Petazzoni return mvneta_txq_desc_csum(skb_network_offset(skb), 1546817dbfa5SVlad Yasevich l3_proto, ip_hdr_len, l4_proto); 1547c5aff182SThomas Petazzoni } 1548c5aff182SThomas Petazzoni 1549c5aff182SThomas Petazzoni return MVNETA_TX_L4_CSUM_NOT; 1550c5aff182SThomas Petazzoni } 1551c5aff182SThomas Petazzoni 1552c5aff182SThomas Petazzoni /* Drop packets received by the RXQ and free buffers */ 1553c5aff182SThomas Petazzoni static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, 1554c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 1555c5aff182SThomas Petazzoni { 1556c5aff182SThomas Petazzoni int rx_done, i; 1557c5aff182SThomas Petazzoni 1558c5aff182SThomas Petazzoni rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1559c5aff182SThomas Petazzoni for (i = 0; i < rxq->size; i++) { 1560c5aff182SThomas Petazzoni struct mvneta_rx_desc *rx_desc = rxq->descs + i; 15618ec2cd48Swilly tarreau void *data = (void *)rx_desc->buf_cookie; 1562c5aff182SThomas Petazzoni 1563c5aff182SThomas Petazzoni dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1564a328f3a0SEzequiel Garcia MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 15658c94ddbcSJustin Maggard mvneta_frag_free(pp, data); 1566c5aff182SThomas Petazzoni } 1567c5aff182SThomas Petazzoni 1568c5aff182SThomas Petazzoni if (rx_done) 1569c5aff182SThomas Petazzoni mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1570c5aff182SThomas Petazzoni } 1571c5aff182SThomas Petazzoni 1572c5aff182SThomas Petazzoni /* Main rx processing */ 1573c5aff182SThomas Petazzoni static int mvneta_rx(struct mvneta_port *pp, int rx_todo, 1574c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 1575c5aff182SThomas Petazzoni { 157612bb03b4SMaxime Ripard struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); 1577c5aff182SThomas Petazzoni struct net_device *dev = pp->dev; 1578a84e3289SSimon Guinot int rx_done; 1579dc4277ddSwilly tarreau u32 rcvd_pkts = 0; 1580dc4277ddSwilly tarreau u32 rcvd_bytes = 0; 1581c5aff182SThomas Petazzoni 1582c5aff182SThomas Petazzoni /* Get number of received packets */ 1583c5aff182SThomas Petazzoni rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1584c5aff182SThomas Petazzoni 1585c5aff182SThomas Petazzoni if (rx_todo > rx_done) 1586c5aff182SThomas Petazzoni rx_todo = rx_done; 1587c5aff182SThomas Petazzoni 1588c5aff182SThomas Petazzoni rx_done = 0; 1589c5aff182SThomas Petazzoni 1590c5aff182SThomas Petazzoni /* Fairness NAPI loop */ 1591c5aff182SThomas Petazzoni while (rx_done < rx_todo) { 1592c5aff182SThomas Petazzoni struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 1593c5aff182SThomas Petazzoni struct sk_buff *skb; 15948ec2cd48Swilly tarreau unsigned char *data; 1595daf158d0SSimon Guinot dma_addr_t phys_addr; 1596c5aff182SThomas Petazzoni u32 rx_status; 1597c5aff182SThomas Petazzoni int rx_bytes, err; 1598c5aff182SThomas Petazzoni 1599c5aff182SThomas Petazzoni rx_done++; 1600c5aff182SThomas Petazzoni rx_status = rx_desc->status; 1601f19fadfcSwilly tarreau rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 16028ec2cd48Swilly tarreau data = (unsigned char *)rx_desc->buf_cookie; 1603daf158d0SSimon Guinot phys_addr = rx_desc->buf_phys_addr; 1604c5aff182SThomas Petazzoni 16055428213cSwilly tarreau if (!mvneta_rxq_desc_is_first_last(rx_status) || 1606f19fadfcSwilly tarreau (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 1607f19fadfcSwilly tarreau err_drop_frame: 1608c5aff182SThomas Petazzoni dev->stats.rx_errors++; 1609c5aff182SThomas Petazzoni mvneta_rx_error(pp, rx_desc); 16108ec2cd48Swilly tarreau /* leave the descriptor untouched */ 1611c5aff182SThomas Petazzoni continue; 1612c5aff182SThomas Petazzoni } 1613c5aff182SThomas Petazzoni 1614f19fadfcSwilly tarreau if (rx_bytes <= rx_copybreak) { 1615f19fadfcSwilly tarreau /* better copy a small frame and not unmap the DMA region */ 1616f19fadfcSwilly tarreau skb = netdev_alloc_skb_ip_align(dev, rx_bytes); 1617f19fadfcSwilly tarreau if (unlikely(!skb)) 1618f19fadfcSwilly tarreau goto err_drop_frame; 1619f19fadfcSwilly tarreau 1620f19fadfcSwilly tarreau dma_sync_single_range_for_cpu(dev->dev.parent, 1621f19fadfcSwilly tarreau rx_desc->buf_phys_addr, 1622f19fadfcSwilly tarreau MVNETA_MH_SIZE + NET_SKB_PAD, 1623f19fadfcSwilly tarreau rx_bytes, 1624f19fadfcSwilly tarreau DMA_FROM_DEVICE); 1625f19fadfcSwilly tarreau memcpy(skb_put(skb, rx_bytes), 1626f19fadfcSwilly tarreau data + MVNETA_MH_SIZE + NET_SKB_PAD, 1627f19fadfcSwilly tarreau rx_bytes); 1628f19fadfcSwilly tarreau 1629f19fadfcSwilly tarreau skb->protocol = eth_type_trans(skb, dev); 1630f19fadfcSwilly tarreau mvneta_rx_csum(pp, rx_status, skb); 163112bb03b4SMaxime Ripard napi_gro_receive(&port->napi, skb); 1632f19fadfcSwilly tarreau 1633f19fadfcSwilly tarreau rcvd_pkts++; 1634f19fadfcSwilly tarreau rcvd_bytes += rx_bytes; 1635f19fadfcSwilly tarreau 1636f19fadfcSwilly tarreau /* leave the descriptor and buffer untouched */ 1637f19fadfcSwilly tarreau continue; 1638f19fadfcSwilly tarreau } 1639f19fadfcSwilly tarreau 1640a84e3289SSimon Guinot /* Refill processing */ 1641a84e3289SSimon Guinot err = mvneta_rx_refill(pp, rx_desc); 1642a84e3289SSimon Guinot if (err) { 1643a84e3289SSimon Guinot netdev_err(dev, "Linux processing - Can't refill\n"); 1644a84e3289SSimon Guinot rxq->missed++; 1645a84e3289SSimon Guinot goto err_drop_frame; 1646a84e3289SSimon Guinot } 1647a84e3289SSimon Guinot 1648f19fadfcSwilly tarreau skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1649f19fadfcSwilly tarreau 165026c17a17SMarcin Wojtas /* After refill old buffer has to be unmapped regardless 165126c17a17SMarcin Wojtas * the skb is successfully built or not. 165226c17a17SMarcin Wojtas */ 1653daf158d0SSimon Guinot dma_unmap_single(dev->dev.parent, phys_addr, 1654a328f3a0SEzequiel Garcia MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1655c5aff182SThomas Petazzoni 165626c17a17SMarcin Wojtas if (!skb) 165726c17a17SMarcin Wojtas goto err_drop_frame; 165826c17a17SMarcin Wojtas 1659dc4277ddSwilly tarreau rcvd_pkts++; 1660dc4277ddSwilly tarreau rcvd_bytes += rx_bytes; 1661c5aff182SThomas Petazzoni 1662c5aff182SThomas Petazzoni /* Linux processing */ 16638ec2cd48Swilly tarreau skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); 1664c5aff182SThomas Petazzoni skb_put(skb, rx_bytes); 1665c5aff182SThomas Petazzoni 1666c5aff182SThomas Petazzoni skb->protocol = eth_type_trans(skb, dev); 1667c5aff182SThomas Petazzoni 16685428213cSwilly tarreau mvneta_rx_csum(pp, rx_status, skb); 1669c5aff182SThomas Petazzoni 167012bb03b4SMaxime Ripard napi_gro_receive(&port->napi, skb); 1671c5aff182SThomas Petazzoni } 1672c5aff182SThomas Petazzoni 1673dc4277ddSwilly tarreau if (rcvd_pkts) { 167474c41b04Swilly tarreau struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 167574c41b04Swilly tarreau 167674c41b04Swilly tarreau u64_stats_update_begin(&stats->syncp); 167774c41b04Swilly tarreau stats->rx_packets += rcvd_pkts; 167874c41b04Swilly tarreau stats->rx_bytes += rcvd_bytes; 167974c41b04Swilly tarreau u64_stats_update_end(&stats->syncp); 1680dc4277ddSwilly tarreau } 1681dc4277ddSwilly tarreau 1682c5aff182SThomas Petazzoni /* Update rxq management counters */ 1683a84e3289SSimon Guinot mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1684c5aff182SThomas Petazzoni 1685c5aff182SThomas Petazzoni return rx_done; 1686c5aff182SThomas Petazzoni } 1687c5aff182SThomas Petazzoni 16882adb719dSEzequiel Garcia static inline void 16892adb719dSEzequiel Garcia mvneta_tso_put_hdr(struct sk_buff *skb, 16902adb719dSEzequiel Garcia struct mvneta_port *pp, struct mvneta_tx_queue *txq) 16912adb719dSEzequiel Garcia { 16922adb719dSEzequiel Garcia struct mvneta_tx_desc *tx_desc; 16932adb719dSEzequiel Garcia int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 16942adb719dSEzequiel Garcia 16952adb719dSEzequiel Garcia txq->tx_skb[txq->txq_put_index] = NULL; 16962adb719dSEzequiel Garcia tx_desc = mvneta_txq_next_desc_get(txq); 16972adb719dSEzequiel Garcia tx_desc->data_size = hdr_len; 16982adb719dSEzequiel Garcia tx_desc->command = mvneta_skb_tx_csum(pp, skb); 16992adb719dSEzequiel Garcia tx_desc->command |= MVNETA_TXD_F_DESC; 17002adb719dSEzequiel Garcia tx_desc->buf_phys_addr = txq->tso_hdrs_phys + 17012adb719dSEzequiel Garcia txq->txq_put_index * TSO_HEADER_SIZE; 17022adb719dSEzequiel Garcia mvneta_txq_inc_put(txq); 17032adb719dSEzequiel Garcia } 17042adb719dSEzequiel Garcia 17052adb719dSEzequiel Garcia static inline int 17062adb719dSEzequiel Garcia mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, 17072adb719dSEzequiel Garcia struct sk_buff *skb, char *data, int size, 17082adb719dSEzequiel Garcia bool last_tcp, bool is_last) 17092adb719dSEzequiel Garcia { 17102adb719dSEzequiel Garcia struct mvneta_tx_desc *tx_desc; 17112adb719dSEzequiel Garcia 17122adb719dSEzequiel Garcia tx_desc = mvneta_txq_next_desc_get(txq); 17132adb719dSEzequiel Garcia tx_desc->data_size = size; 17142adb719dSEzequiel Garcia tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, 17152adb719dSEzequiel Garcia size, DMA_TO_DEVICE); 17162adb719dSEzequiel Garcia if (unlikely(dma_mapping_error(dev->dev.parent, 17172adb719dSEzequiel Garcia tx_desc->buf_phys_addr))) { 17182adb719dSEzequiel Garcia mvneta_txq_desc_put(txq); 17192adb719dSEzequiel Garcia return -ENOMEM; 17202adb719dSEzequiel Garcia } 17212adb719dSEzequiel Garcia 17222adb719dSEzequiel Garcia tx_desc->command = 0; 17232adb719dSEzequiel Garcia txq->tx_skb[txq->txq_put_index] = NULL; 17242adb719dSEzequiel Garcia 17252adb719dSEzequiel Garcia if (last_tcp) { 17262adb719dSEzequiel Garcia /* last descriptor in the TCP packet */ 17272adb719dSEzequiel Garcia tx_desc->command = MVNETA_TXD_L_DESC; 17282adb719dSEzequiel Garcia 17292adb719dSEzequiel Garcia /* last descriptor in SKB */ 17302adb719dSEzequiel Garcia if (is_last) 17312adb719dSEzequiel Garcia txq->tx_skb[txq->txq_put_index] = skb; 17322adb719dSEzequiel Garcia } 17332adb719dSEzequiel Garcia mvneta_txq_inc_put(txq); 17342adb719dSEzequiel Garcia return 0; 17352adb719dSEzequiel Garcia } 17362adb719dSEzequiel Garcia 17372adb719dSEzequiel Garcia static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, 17382adb719dSEzequiel Garcia struct mvneta_tx_queue *txq) 17392adb719dSEzequiel Garcia { 17402adb719dSEzequiel Garcia int total_len, data_left; 17412adb719dSEzequiel Garcia int desc_count = 0; 17422adb719dSEzequiel Garcia struct mvneta_port *pp = netdev_priv(dev); 17432adb719dSEzequiel Garcia struct tso_t tso; 17442adb719dSEzequiel Garcia int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 17452adb719dSEzequiel Garcia int i; 17462adb719dSEzequiel Garcia 17472adb719dSEzequiel Garcia /* Count needed descriptors */ 17482adb719dSEzequiel Garcia if ((txq->count + tso_count_descs(skb)) >= txq->size) 17492adb719dSEzequiel Garcia return 0; 17502adb719dSEzequiel Garcia 17512adb719dSEzequiel Garcia if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { 17522adb719dSEzequiel Garcia pr_info("*** Is this even possible???!?!?\n"); 17532adb719dSEzequiel Garcia return 0; 17542adb719dSEzequiel Garcia } 17552adb719dSEzequiel Garcia 17562adb719dSEzequiel Garcia /* Initialize the TSO handler, and prepare the first payload */ 17572adb719dSEzequiel Garcia tso_start(skb, &tso); 17582adb719dSEzequiel Garcia 17592adb719dSEzequiel Garcia total_len = skb->len - hdr_len; 17602adb719dSEzequiel Garcia while (total_len > 0) { 17612adb719dSEzequiel Garcia char *hdr; 17622adb719dSEzequiel Garcia 17632adb719dSEzequiel Garcia data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 17642adb719dSEzequiel Garcia total_len -= data_left; 17652adb719dSEzequiel Garcia desc_count++; 17662adb719dSEzequiel Garcia 17672adb719dSEzequiel Garcia /* prepare packet headers: MAC + IP + TCP */ 17682adb719dSEzequiel Garcia hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; 17692adb719dSEzequiel Garcia tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 17702adb719dSEzequiel Garcia 17712adb719dSEzequiel Garcia mvneta_tso_put_hdr(skb, pp, txq); 17722adb719dSEzequiel Garcia 17732adb719dSEzequiel Garcia while (data_left > 0) { 17742adb719dSEzequiel Garcia int size; 17752adb719dSEzequiel Garcia desc_count++; 17762adb719dSEzequiel Garcia 17772adb719dSEzequiel Garcia size = min_t(int, tso.size, data_left); 17782adb719dSEzequiel Garcia 17792adb719dSEzequiel Garcia if (mvneta_tso_put_data(dev, txq, skb, 17802adb719dSEzequiel Garcia tso.data, size, 17812adb719dSEzequiel Garcia size == data_left, 17822adb719dSEzequiel Garcia total_len == 0)) 17832adb719dSEzequiel Garcia goto err_release; 17842adb719dSEzequiel Garcia data_left -= size; 17852adb719dSEzequiel Garcia 17862adb719dSEzequiel Garcia tso_build_data(skb, &tso, size); 17872adb719dSEzequiel Garcia } 17882adb719dSEzequiel Garcia } 17892adb719dSEzequiel Garcia 17902adb719dSEzequiel Garcia return desc_count; 17912adb719dSEzequiel Garcia 17922adb719dSEzequiel Garcia err_release: 17932adb719dSEzequiel Garcia /* Release all used data descriptors; header descriptors must not 17942adb719dSEzequiel Garcia * be DMA-unmapped. 17952adb719dSEzequiel Garcia */ 17962adb719dSEzequiel Garcia for (i = desc_count - 1; i >= 0; i--) { 17972adb719dSEzequiel Garcia struct mvneta_tx_desc *tx_desc = txq->descs + i; 17982e3173a3SEzequiel Garcia if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) 17992adb719dSEzequiel Garcia dma_unmap_single(pp->dev->dev.parent, 18002adb719dSEzequiel Garcia tx_desc->buf_phys_addr, 18012adb719dSEzequiel Garcia tx_desc->data_size, 18022adb719dSEzequiel Garcia DMA_TO_DEVICE); 18032adb719dSEzequiel Garcia mvneta_txq_desc_put(txq); 18042adb719dSEzequiel Garcia } 18052adb719dSEzequiel Garcia return 0; 18062adb719dSEzequiel Garcia } 18072adb719dSEzequiel Garcia 1808c5aff182SThomas Petazzoni /* Handle tx fragmentation processing */ 1809c5aff182SThomas Petazzoni static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 1810c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 1811c5aff182SThomas Petazzoni { 1812c5aff182SThomas Petazzoni struct mvneta_tx_desc *tx_desc; 18133d4ea02fSEzequiel Garcia int i, nr_frags = skb_shinfo(skb)->nr_frags; 1814c5aff182SThomas Petazzoni 18153d4ea02fSEzequiel Garcia for (i = 0; i < nr_frags; i++) { 1816c5aff182SThomas Petazzoni skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1817c5aff182SThomas Petazzoni void *addr = page_address(frag->page.p) + frag->page_offset; 1818c5aff182SThomas Petazzoni 1819c5aff182SThomas Petazzoni tx_desc = mvneta_txq_next_desc_get(txq); 1820c5aff182SThomas Petazzoni tx_desc->data_size = frag->size; 1821c5aff182SThomas Petazzoni 1822c5aff182SThomas Petazzoni tx_desc->buf_phys_addr = 1823c5aff182SThomas Petazzoni dma_map_single(pp->dev->dev.parent, addr, 1824c5aff182SThomas Petazzoni tx_desc->data_size, DMA_TO_DEVICE); 1825c5aff182SThomas Petazzoni 1826c5aff182SThomas Petazzoni if (dma_mapping_error(pp->dev->dev.parent, 1827c5aff182SThomas Petazzoni tx_desc->buf_phys_addr)) { 1828c5aff182SThomas Petazzoni mvneta_txq_desc_put(txq); 1829c5aff182SThomas Petazzoni goto error; 1830c5aff182SThomas Petazzoni } 1831c5aff182SThomas Petazzoni 18323d4ea02fSEzequiel Garcia if (i == nr_frags - 1) { 1833c5aff182SThomas Petazzoni /* Last descriptor */ 1834c5aff182SThomas Petazzoni tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 1835c5aff182SThomas Petazzoni txq->tx_skb[txq->txq_put_index] = skb; 1836c5aff182SThomas Petazzoni } else { 1837c5aff182SThomas Petazzoni /* Descriptor in the middle: Not First, Not Last */ 1838c5aff182SThomas Petazzoni tx_desc->command = 0; 1839c5aff182SThomas Petazzoni txq->tx_skb[txq->txq_put_index] = NULL; 1840c5aff182SThomas Petazzoni } 18413d4ea02fSEzequiel Garcia mvneta_txq_inc_put(txq); 1842c5aff182SThomas Petazzoni } 1843c5aff182SThomas Petazzoni 1844c5aff182SThomas Petazzoni return 0; 1845c5aff182SThomas Petazzoni 1846c5aff182SThomas Petazzoni error: 1847c5aff182SThomas Petazzoni /* Release all descriptors that were used to map fragments of 18486a20c175SThomas Petazzoni * this packet, as well as the corresponding DMA mappings 18496a20c175SThomas Petazzoni */ 1850c5aff182SThomas Petazzoni for (i = i - 1; i >= 0; i--) { 1851c5aff182SThomas Petazzoni tx_desc = txq->descs + i; 1852c5aff182SThomas Petazzoni dma_unmap_single(pp->dev->dev.parent, 1853c5aff182SThomas Petazzoni tx_desc->buf_phys_addr, 1854c5aff182SThomas Petazzoni tx_desc->data_size, 1855c5aff182SThomas Petazzoni DMA_TO_DEVICE); 1856c5aff182SThomas Petazzoni mvneta_txq_desc_put(txq); 1857c5aff182SThomas Petazzoni } 1858c5aff182SThomas Petazzoni 1859c5aff182SThomas Petazzoni return -ENOMEM; 1860c5aff182SThomas Petazzoni } 1861c5aff182SThomas Petazzoni 1862c5aff182SThomas Petazzoni /* Main tx processing */ 1863c5aff182SThomas Petazzoni static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) 1864c5aff182SThomas Petazzoni { 1865c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 1866ee40a116SWilly Tarreau u16 txq_id = skb_get_queue_mapping(skb); 1867ee40a116SWilly Tarreau struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 1868c5aff182SThomas Petazzoni struct mvneta_tx_desc *tx_desc; 18695f478b41SEric Dumazet int len = skb->len; 1870c5aff182SThomas Petazzoni int frags = 0; 1871c5aff182SThomas Petazzoni u32 tx_cmd; 1872c5aff182SThomas Petazzoni 1873c5aff182SThomas Petazzoni if (!netif_running(dev)) 1874c5aff182SThomas Petazzoni goto out; 1875c5aff182SThomas Petazzoni 18762adb719dSEzequiel Garcia if (skb_is_gso(skb)) { 18772adb719dSEzequiel Garcia frags = mvneta_tx_tso(skb, dev, txq); 18782adb719dSEzequiel Garcia goto out; 18792adb719dSEzequiel Garcia } 18802adb719dSEzequiel Garcia 1881c5aff182SThomas Petazzoni frags = skb_shinfo(skb)->nr_frags + 1; 1882c5aff182SThomas Petazzoni 1883c5aff182SThomas Petazzoni /* Get a descriptor for the first part of the packet */ 1884c5aff182SThomas Petazzoni tx_desc = mvneta_txq_next_desc_get(txq); 1885c5aff182SThomas Petazzoni 1886c5aff182SThomas Petazzoni tx_cmd = mvneta_skb_tx_csum(pp, skb); 1887c5aff182SThomas Petazzoni 1888c5aff182SThomas Petazzoni tx_desc->data_size = skb_headlen(skb); 1889c5aff182SThomas Petazzoni 1890c5aff182SThomas Petazzoni tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, 1891c5aff182SThomas Petazzoni tx_desc->data_size, 1892c5aff182SThomas Petazzoni DMA_TO_DEVICE); 1893c5aff182SThomas Petazzoni if (unlikely(dma_mapping_error(dev->dev.parent, 1894c5aff182SThomas Petazzoni tx_desc->buf_phys_addr))) { 1895c5aff182SThomas Petazzoni mvneta_txq_desc_put(txq); 1896c5aff182SThomas Petazzoni frags = 0; 1897c5aff182SThomas Petazzoni goto out; 1898c5aff182SThomas Petazzoni } 1899c5aff182SThomas Petazzoni 1900c5aff182SThomas Petazzoni if (frags == 1) { 1901c5aff182SThomas Petazzoni /* First and Last descriptor */ 1902c5aff182SThomas Petazzoni tx_cmd |= MVNETA_TXD_FLZ_DESC; 1903c5aff182SThomas Petazzoni tx_desc->command = tx_cmd; 1904c5aff182SThomas Petazzoni txq->tx_skb[txq->txq_put_index] = skb; 1905c5aff182SThomas Petazzoni mvneta_txq_inc_put(txq); 1906c5aff182SThomas Petazzoni } else { 1907c5aff182SThomas Petazzoni /* First but not Last */ 1908c5aff182SThomas Petazzoni tx_cmd |= MVNETA_TXD_F_DESC; 1909c5aff182SThomas Petazzoni txq->tx_skb[txq->txq_put_index] = NULL; 1910c5aff182SThomas Petazzoni mvneta_txq_inc_put(txq); 1911c5aff182SThomas Petazzoni tx_desc->command = tx_cmd; 1912c5aff182SThomas Petazzoni /* Continue with other skb fragments */ 1913c5aff182SThomas Petazzoni if (mvneta_tx_frag_process(pp, skb, txq)) { 1914c5aff182SThomas Petazzoni dma_unmap_single(dev->dev.parent, 1915c5aff182SThomas Petazzoni tx_desc->buf_phys_addr, 1916c5aff182SThomas Petazzoni tx_desc->data_size, 1917c5aff182SThomas Petazzoni DMA_TO_DEVICE); 1918c5aff182SThomas Petazzoni mvneta_txq_desc_put(txq); 1919c5aff182SThomas Petazzoni frags = 0; 1920c5aff182SThomas Petazzoni goto out; 1921c5aff182SThomas Petazzoni } 1922c5aff182SThomas Petazzoni } 1923c5aff182SThomas Petazzoni 1924e19d2ddaSEzequiel Garcia out: 1925e19d2ddaSEzequiel Garcia if (frags > 0) { 1926e19d2ddaSEzequiel Garcia struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1927e19d2ddaSEzequiel Garcia struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 1928e19d2ddaSEzequiel Garcia 1929c5aff182SThomas Petazzoni txq->count += frags; 1930c5aff182SThomas Petazzoni mvneta_txq_pend_desc_add(pp, txq, frags); 1931c5aff182SThomas Petazzoni 19328eef5f97SEzequiel Garcia if (txq->count >= txq->tx_stop_threshold) 1933c5aff182SThomas Petazzoni netif_tx_stop_queue(nq); 1934c5aff182SThomas Petazzoni 193574c41b04Swilly tarreau u64_stats_update_begin(&stats->syncp); 193674c41b04Swilly tarreau stats->tx_packets++; 19375f478b41SEric Dumazet stats->tx_bytes += len; 193874c41b04Swilly tarreau u64_stats_update_end(&stats->syncp); 1939c5aff182SThomas Petazzoni } else { 1940c5aff182SThomas Petazzoni dev->stats.tx_dropped++; 1941c5aff182SThomas Petazzoni dev_kfree_skb_any(skb); 1942c5aff182SThomas Petazzoni } 1943c5aff182SThomas Petazzoni 1944c5aff182SThomas Petazzoni return NETDEV_TX_OK; 1945c5aff182SThomas Petazzoni } 1946c5aff182SThomas Petazzoni 1947c5aff182SThomas Petazzoni 1948c5aff182SThomas Petazzoni /* Free tx resources, when resetting a port */ 1949c5aff182SThomas Petazzoni static void mvneta_txq_done_force(struct mvneta_port *pp, 1950c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 1951c5aff182SThomas Petazzoni 1952c5aff182SThomas Petazzoni { 1953c5aff182SThomas Petazzoni int tx_done = txq->count; 1954c5aff182SThomas Petazzoni 1955c5aff182SThomas Petazzoni mvneta_txq_bufs_free(pp, txq, tx_done); 1956c5aff182SThomas Petazzoni 1957c5aff182SThomas Petazzoni /* reset txq */ 1958c5aff182SThomas Petazzoni txq->count = 0; 1959c5aff182SThomas Petazzoni txq->txq_put_index = 0; 1960c5aff182SThomas Petazzoni txq->txq_get_index = 0; 1961c5aff182SThomas Petazzoni } 1962c5aff182SThomas Petazzoni 19636c498974Swilly tarreau /* Handle tx done - called in softirq context. The <cause_tx_done> argument 19646c498974Swilly tarreau * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. 19656c498974Swilly tarreau */ 19660713a86aSArnaud Ebalard static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) 1967c5aff182SThomas Petazzoni { 1968c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq; 1969c5aff182SThomas Petazzoni struct netdev_queue *nq; 1970c5aff182SThomas Petazzoni 19716c498974Swilly tarreau while (cause_tx_done) { 1972c5aff182SThomas Petazzoni txq = mvneta_tx_done_policy(pp, cause_tx_done); 1973c5aff182SThomas Petazzoni 1974c5aff182SThomas Petazzoni nq = netdev_get_tx_queue(pp->dev, txq->id); 1975c5aff182SThomas Petazzoni __netif_tx_lock(nq, smp_processor_id()); 1976c5aff182SThomas Petazzoni 19770713a86aSArnaud Ebalard if (txq->count) 19780713a86aSArnaud Ebalard mvneta_txq_done(pp, txq); 1979c5aff182SThomas Petazzoni 1980c5aff182SThomas Petazzoni __netif_tx_unlock(nq); 1981c5aff182SThomas Petazzoni cause_tx_done &= ~((1 << txq->id)); 1982c5aff182SThomas Petazzoni } 1983c5aff182SThomas Petazzoni } 1984c5aff182SThomas Petazzoni 19856a20c175SThomas Petazzoni /* Compute crc8 of the specified address, using a unique algorithm , 1986c5aff182SThomas Petazzoni * according to hw spec, different than generic crc8 algorithm 1987c5aff182SThomas Petazzoni */ 1988c5aff182SThomas Petazzoni static int mvneta_addr_crc(unsigned char *addr) 1989c5aff182SThomas Petazzoni { 1990c5aff182SThomas Petazzoni int crc = 0; 1991c5aff182SThomas Petazzoni int i; 1992c5aff182SThomas Petazzoni 1993c5aff182SThomas Petazzoni for (i = 0; i < ETH_ALEN; i++) { 1994c5aff182SThomas Petazzoni int j; 1995c5aff182SThomas Petazzoni 1996c5aff182SThomas Petazzoni crc = (crc ^ addr[i]) << 8; 1997c5aff182SThomas Petazzoni for (j = 7; j >= 0; j--) { 1998c5aff182SThomas Petazzoni if (crc & (0x100 << j)) 1999c5aff182SThomas Petazzoni crc ^= 0x107 << j; 2000c5aff182SThomas Petazzoni } 2001c5aff182SThomas Petazzoni } 2002c5aff182SThomas Petazzoni 2003c5aff182SThomas Petazzoni return crc; 2004c5aff182SThomas Petazzoni } 2005c5aff182SThomas Petazzoni 2006c5aff182SThomas Petazzoni /* This method controls the net device special MAC multicast support. 2007c5aff182SThomas Petazzoni * The Special Multicast Table for MAC addresses supports MAC of the form 2008c5aff182SThomas Petazzoni * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 2009c5aff182SThomas Petazzoni * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 2010c5aff182SThomas Petazzoni * Table entries in the DA-Filter table. This method set the Special 2011c5aff182SThomas Petazzoni * Multicast Table appropriate entry. 2012c5aff182SThomas Petazzoni */ 2013c5aff182SThomas Petazzoni static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, 2014c5aff182SThomas Petazzoni unsigned char last_byte, 2015c5aff182SThomas Petazzoni int queue) 2016c5aff182SThomas Petazzoni { 2017c5aff182SThomas Petazzoni unsigned int smc_table_reg; 2018c5aff182SThomas Petazzoni unsigned int tbl_offset; 2019c5aff182SThomas Petazzoni unsigned int reg_offset; 2020c5aff182SThomas Petazzoni 2021c5aff182SThomas Petazzoni /* Register offset from SMC table base */ 2022c5aff182SThomas Petazzoni tbl_offset = (last_byte / 4); 2023c5aff182SThomas Petazzoni /* Entry offset within the above reg */ 2024c5aff182SThomas Petazzoni reg_offset = last_byte % 4; 2025c5aff182SThomas Petazzoni 2026c5aff182SThomas Petazzoni smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST 2027c5aff182SThomas Petazzoni + tbl_offset * 4)); 2028c5aff182SThomas Petazzoni 2029c5aff182SThomas Petazzoni if (queue == -1) 2030c5aff182SThomas Petazzoni smc_table_reg &= ~(0xff << (8 * reg_offset)); 2031c5aff182SThomas Petazzoni else { 2032c5aff182SThomas Petazzoni smc_table_reg &= ~(0xff << (8 * reg_offset)); 2033c5aff182SThomas Petazzoni smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 2034c5aff182SThomas Petazzoni } 2035c5aff182SThomas Petazzoni 2036c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, 2037c5aff182SThomas Petazzoni smc_table_reg); 2038c5aff182SThomas Petazzoni } 2039c5aff182SThomas Petazzoni 2040c5aff182SThomas Petazzoni /* This method controls the network device Other MAC multicast support. 2041c5aff182SThomas Petazzoni * The Other Multicast Table is used for multicast of another type. 2042c5aff182SThomas Petazzoni * A CRC-8 is used as an index to the Other Multicast Table entries 2043c5aff182SThomas Petazzoni * in the DA-Filter table. 2044c5aff182SThomas Petazzoni * The method gets the CRC-8 value from the calling routine and 2045c5aff182SThomas Petazzoni * sets the Other Multicast Table appropriate entry according to the 2046c5aff182SThomas Petazzoni * specified CRC-8 . 2047c5aff182SThomas Petazzoni */ 2048c5aff182SThomas Petazzoni static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, 2049c5aff182SThomas Petazzoni unsigned char crc8, 2050c5aff182SThomas Petazzoni int queue) 2051c5aff182SThomas Petazzoni { 2052c5aff182SThomas Petazzoni unsigned int omc_table_reg; 2053c5aff182SThomas Petazzoni unsigned int tbl_offset; 2054c5aff182SThomas Petazzoni unsigned int reg_offset; 2055c5aff182SThomas Petazzoni 2056c5aff182SThomas Petazzoni tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ 2057c5aff182SThomas Petazzoni reg_offset = crc8 % 4; /* Entry offset within the above reg */ 2058c5aff182SThomas Petazzoni 2059c5aff182SThomas Petazzoni omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); 2060c5aff182SThomas Petazzoni 2061c5aff182SThomas Petazzoni if (queue == -1) { 2062c5aff182SThomas Petazzoni /* Clear accepts frame bit at specified Other DA table entry */ 2063c5aff182SThomas Petazzoni omc_table_reg &= ~(0xff << (8 * reg_offset)); 2064c5aff182SThomas Petazzoni } else { 2065c5aff182SThomas Petazzoni omc_table_reg &= ~(0xff << (8 * reg_offset)); 2066c5aff182SThomas Petazzoni omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 2067c5aff182SThomas Petazzoni } 2068c5aff182SThomas Petazzoni 2069c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); 2070c5aff182SThomas Petazzoni } 2071c5aff182SThomas Petazzoni 2072c5aff182SThomas Petazzoni /* The network device supports multicast using two tables: 2073c5aff182SThomas Petazzoni * 1) Special Multicast Table for MAC addresses of the form 2074c5aff182SThomas Petazzoni * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 2075c5aff182SThomas Petazzoni * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 2076c5aff182SThomas Petazzoni * Table entries in the DA-Filter table. 2077c5aff182SThomas Petazzoni * 2) Other Multicast Table for multicast of another type. A CRC-8 value 2078c5aff182SThomas Petazzoni * is used as an index to the Other Multicast Table entries in the 2079c5aff182SThomas Petazzoni * DA-Filter table. 2080c5aff182SThomas Petazzoni */ 2081c5aff182SThomas Petazzoni static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, 2082c5aff182SThomas Petazzoni int queue) 2083c5aff182SThomas Petazzoni { 2084c5aff182SThomas Petazzoni unsigned char crc_result = 0; 2085c5aff182SThomas Petazzoni 2086c5aff182SThomas Petazzoni if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { 2087c5aff182SThomas Petazzoni mvneta_set_special_mcast_addr(pp, p_addr[5], queue); 2088c5aff182SThomas Petazzoni return 0; 2089c5aff182SThomas Petazzoni } 2090c5aff182SThomas Petazzoni 2091c5aff182SThomas Petazzoni crc_result = mvneta_addr_crc(p_addr); 2092c5aff182SThomas Petazzoni if (queue == -1) { 2093c5aff182SThomas Petazzoni if (pp->mcast_count[crc_result] == 0) { 2094c5aff182SThomas Petazzoni netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", 2095c5aff182SThomas Petazzoni crc_result); 2096c5aff182SThomas Petazzoni return -EINVAL; 2097c5aff182SThomas Petazzoni } 2098c5aff182SThomas Petazzoni 2099c5aff182SThomas Petazzoni pp->mcast_count[crc_result]--; 2100c5aff182SThomas Petazzoni if (pp->mcast_count[crc_result] != 0) { 2101c5aff182SThomas Petazzoni netdev_info(pp->dev, 2102c5aff182SThomas Petazzoni "After delete there are %d valid Mcast for crc8=0x%02x\n", 2103c5aff182SThomas Petazzoni pp->mcast_count[crc_result], crc_result); 2104c5aff182SThomas Petazzoni return -EINVAL; 2105c5aff182SThomas Petazzoni } 2106c5aff182SThomas Petazzoni } else 2107c5aff182SThomas Petazzoni pp->mcast_count[crc_result]++; 2108c5aff182SThomas Petazzoni 2109c5aff182SThomas Petazzoni mvneta_set_other_mcast_addr(pp, crc_result, queue); 2110c5aff182SThomas Petazzoni 2111c5aff182SThomas Petazzoni return 0; 2112c5aff182SThomas Petazzoni } 2113c5aff182SThomas Petazzoni 2114c5aff182SThomas Petazzoni /* Configure Fitering mode of Ethernet port */ 2115c5aff182SThomas Petazzoni static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, 2116c5aff182SThomas Petazzoni int is_promisc) 2117c5aff182SThomas Petazzoni { 2118c5aff182SThomas Petazzoni u32 port_cfg_reg, val; 2119c5aff182SThomas Petazzoni 2120c5aff182SThomas Petazzoni port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); 2121c5aff182SThomas Petazzoni 2122c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TYPE_PRIO); 2123c5aff182SThomas Petazzoni 2124c5aff182SThomas Petazzoni /* Set / Clear UPM bit in port configuration register */ 2125c5aff182SThomas Petazzoni if (is_promisc) { 2126c5aff182SThomas Petazzoni /* Accept all Unicast addresses */ 2127c5aff182SThomas Petazzoni port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; 2128c5aff182SThomas Petazzoni val |= MVNETA_FORCE_UNI; 2129c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); 2130c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); 2131c5aff182SThomas Petazzoni } else { 2132c5aff182SThomas Petazzoni /* Reject all Unicast addresses */ 2133c5aff182SThomas Petazzoni port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; 2134c5aff182SThomas Petazzoni val &= ~MVNETA_FORCE_UNI; 2135c5aff182SThomas Petazzoni } 2136c5aff182SThomas Petazzoni 2137c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); 2138c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TYPE_PRIO, val); 2139c5aff182SThomas Petazzoni } 2140c5aff182SThomas Petazzoni 2141c5aff182SThomas Petazzoni /* register unicast and multicast addresses */ 2142c5aff182SThomas Petazzoni static void mvneta_set_rx_mode(struct net_device *dev) 2143c5aff182SThomas Petazzoni { 2144c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 2145c5aff182SThomas Petazzoni struct netdev_hw_addr *ha; 2146c5aff182SThomas Petazzoni 2147c5aff182SThomas Petazzoni if (dev->flags & IFF_PROMISC) { 2148c5aff182SThomas Petazzoni /* Accept all: Multicast + Unicast */ 2149c5aff182SThomas Petazzoni mvneta_rx_unicast_promisc_set(pp, 1); 215090b74c01SGregory CLEMENT mvneta_set_ucast_table(pp, pp->rxq_def); 215190b74c01SGregory CLEMENT mvneta_set_special_mcast_table(pp, pp->rxq_def); 215290b74c01SGregory CLEMENT mvneta_set_other_mcast_table(pp, pp->rxq_def); 2153c5aff182SThomas Petazzoni } else { 2154c5aff182SThomas Petazzoni /* Accept single Unicast */ 2155c5aff182SThomas Petazzoni mvneta_rx_unicast_promisc_set(pp, 0); 2156c5aff182SThomas Petazzoni mvneta_set_ucast_table(pp, -1); 215790b74c01SGregory CLEMENT mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); 2158c5aff182SThomas Petazzoni 2159c5aff182SThomas Petazzoni if (dev->flags & IFF_ALLMULTI) { 2160c5aff182SThomas Petazzoni /* Accept all multicast */ 216190b74c01SGregory CLEMENT mvneta_set_special_mcast_table(pp, pp->rxq_def); 216290b74c01SGregory CLEMENT mvneta_set_other_mcast_table(pp, pp->rxq_def); 2163c5aff182SThomas Petazzoni } else { 2164c5aff182SThomas Petazzoni /* Accept only initialized multicast */ 2165c5aff182SThomas Petazzoni mvneta_set_special_mcast_table(pp, -1); 2166c5aff182SThomas Petazzoni mvneta_set_other_mcast_table(pp, -1); 2167c5aff182SThomas Petazzoni 2168c5aff182SThomas Petazzoni if (!netdev_mc_empty(dev)) { 2169c5aff182SThomas Petazzoni netdev_for_each_mc_addr(ha, dev) { 2170c5aff182SThomas Petazzoni mvneta_mcast_addr_set(pp, ha->addr, 217190b74c01SGregory CLEMENT pp->rxq_def); 2172c5aff182SThomas Petazzoni } 2173c5aff182SThomas Petazzoni } 2174c5aff182SThomas Petazzoni } 2175c5aff182SThomas Petazzoni } 2176c5aff182SThomas Petazzoni } 2177c5aff182SThomas Petazzoni 2178c5aff182SThomas Petazzoni /* Interrupt handling - the callback for request_irq() */ 2179c5aff182SThomas Petazzoni static irqreturn_t mvneta_isr(int irq, void *dev_id) 2180c5aff182SThomas Petazzoni { 218112bb03b4SMaxime Ripard struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; 2182c5aff182SThomas Petazzoni 218312bb03b4SMaxime Ripard disable_percpu_irq(port->pp->dev->irq); 218412bb03b4SMaxime Ripard napi_schedule(&port->napi); 2185c5aff182SThomas Petazzoni 2186c5aff182SThomas Petazzoni return IRQ_HANDLED; 2187c5aff182SThomas Petazzoni } 2188c5aff182SThomas Petazzoni 2189898b2970SStas Sergeev static int mvneta_fixed_link_update(struct mvneta_port *pp, 2190898b2970SStas Sergeev struct phy_device *phy) 2191898b2970SStas Sergeev { 2192898b2970SStas Sergeev struct fixed_phy_status status; 2193898b2970SStas Sergeev struct fixed_phy_status changed = {}; 2194898b2970SStas Sergeev u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 2195898b2970SStas Sergeev 2196898b2970SStas Sergeev status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); 2197898b2970SStas Sergeev if (gmac_stat & MVNETA_GMAC_SPEED_1000) 2198898b2970SStas Sergeev status.speed = SPEED_1000; 2199898b2970SStas Sergeev else if (gmac_stat & MVNETA_GMAC_SPEED_100) 2200898b2970SStas Sergeev status.speed = SPEED_100; 2201898b2970SStas Sergeev else 2202898b2970SStas Sergeev status.speed = SPEED_10; 2203898b2970SStas Sergeev status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); 2204898b2970SStas Sergeev changed.link = 1; 2205898b2970SStas Sergeev changed.speed = 1; 2206898b2970SStas Sergeev changed.duplex = 1; 2207898b2970SStas Sergeev fixed_phy_update_state(phy, &status, &changed); 2208898b2970SStas Sergeev return 0; 2209898b2970SStas Sergeev } 2210898b2970SStas Sergeev 2211c5aff182SThomas Petazzoni /* NAPI handler 2212c5aff182SThomas Petazzoni * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 2213c5aff182SThomas Petazzoni * packets on the corresponding TXQ (Bit 0 is for TX queue 1). 2214c5aff182SThomas Petazzoni * Bits 8 -15 of the cause Rx Tx register indicate that are received 2215c5aff182SThomas Petazzoni * packets on the corresponding RXQ (Bit 8 is for RX queue 0). 2216c5aff182SThomas Petazzoni * Each CPU has its own causeRxTx register 2217c5aff182SThomas Petazzoni */ 2218c5aff182SThomas Petazzoni static int mvneta_poll(struct napi_struct *napi, int budget) 2219c5aff182SThomas Petazzoni { 2220c5aff182SThomas Petazzoni int rx_done = 0; 2221c5aff182SThomas Petazzoni u32 cause_rx_tx; 22222dcf75e2SGregory CLEMENT int rx_queue; 2223c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(napi->dev); 222412bb03b4SMaxime Ripard struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); 2225c5aff182SThomas Petazzoni 2226c5aff182SThomas Petazzoni if (!netif_running(pp->dev)) { 222712bb03b4SMaxime Ripard napi_complete(&port->napi); 2228c5aff182SThomas Petazzoni return rx_done; 2229c5aff182SThomas Petazzoni } 2230c5aff182SThomas Petazzoni 2231c5aff182SThomas Petazzoni /* Read cause register */ 2232898b2970SStas Sergeev cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); 2233898b2970SStas Sergeev if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { 2234898b2970SStas Sergeev u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); 2235898b2970SStas Sergeev 2236898b2970SStas Sergeev mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2237898b2970SStas Sergeev if (pp->use_inband_status && (cause_misc & 2238898b2970SStas Sergeev (MVNETA_CAUSE_PHY_STATUS_CHANGE | 2239898b2970SStas Sergeev MVNETA_CAUSE_LINK_CHANGE | 2240898b2970SStas Sergeev MVNETA_CAUSE_PSC_SYNC_CHANGE))) { 2241898b2970SStas Sergeev mvneta_fixed_link_update(pp, pp->phy_dev); 2242898b2970SStas Sergeev } 2243898b2970SStas Sergeev } 224471f6d1b3Swilly tarreau 224571f6d1b3Swilly tarreau /* Release Tx descriptors */ 224671f6d1b3Swilly tarreau if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { 22470713a86aSArnaud Ebalard mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); 224871f6d1b3Swilly tarreau cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; 224971f6d1b3Swilly tarreau } 2250c5aff182SThomas Petazzoni 22516a20c175SThomas Petazzoni /* For the case where the last mvneta_poll did not process all 2252c5aff182SThomas Petazzoni * RX packets 2253c5aff182SThomas Petazzoni */ 22542dcf75e2SGregory CLEMENT rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); 22552dcf75e2SGregory CLEMENT 225612bb03b4SMaxime Ripard cause_rx_tx |= port->cause_rx_tx; 22572dcf75e2SGregory CLEMENT 22582dcf75e2SGregory CLEMENT if (rx_queue) { 22592dcf75e2SGregory CLEMENT rx_queue = rx_queue - 1; 22602dcf75e2SGregory CLEMENT rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]); 22612dcf75e2SGregory CLEMENT } 22622dcf75e2SGregory CLEMENT 2263c5aff182SThomas Petazzoni budget -= rx_done; 2264c5aff182SThomas Petazzoni 2265c5aff182SThomas Petazzoni if (budget > 0) { 2266c5aff182SThomas Petazzoni cause_rx_tx = 0; 226712bb03b4SMaxime Ripard napi_complete(&port->napi); 226812bb03b4SMaxime Ripard enable_percpu_irq(pp->dev->irq, 0); 2269c5aff182SThomas Petazzoni } 2270c5aff182SThomas Petazzoni 227112bb03b4SMaxime Ripard port->cause_rx_tx = cause_rx_tx; 2272c5aff182SThomas Petazzoni return rx_done; 2273c5aff182SThomas Petazzoni } 2274c5aff182SThomas Petazzoni 2275c5aff182SThomas Petazzoni /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ 2276c5aff182SThomas Petazzoni static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2277c5aff182SThomas Petazzoni int num) 2278c5aff182SThomas Petazzoni { 2279c5aff182SThomas Petazzoni int i; 2280c5aff182SThomas Petazzoni 2281c5aff182SThomas Petazzoni for (i = 0; i < num; i++) { 2282a1a65ab1Swilly tarreau memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); 2283a1a65ab1Swilly tarreau if (mvneta_rx_refill(pp, rxq->descs + i) != 0) { 2284a1a65ab1Swilly tarreau netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", 2285c5aff182SThomas Petazzoni __func__, rxq->id, i, num); 2286c5aff182SThomas Petazzoni break; 2287c5aff182SThomas Petazzoni } 2288c5aff182SThomas Petazzoni } 2289c5aff182SThomas Petazzoni 2290c5aff182SThomas Petazzoni /* Add this number of RX descriptors as non occupied (ready to 22916a20c175SThomas Petazzoni * get packets) 22926a20c175SThomas Petazzoni */ 2293c5aff182SThomas Petazzoni mvneta_rxq_non_occup_desc_add(pp, rxq, i); 2294c5aff182SThomas Petazzoni 2295c5aff182SThomas Petazzoni return i; 2296c5aff182SThomas Petazzoni } 2297c5aff182SThomas Petazzoni 2298c5aff182SThomas Petazzoni /* Free all packets pending transmit from all TXQs and reset TX port */ 2299c5aff182SThomas Petazzoni static void mvneta_tx_reset(struct mvneta_port *pp) 2300c5aff182SThomas Petazzoni { 2301c5aff182SThomas Petazzoni int queue; 2302c5aff182SThomas Petazzoni 23039672850bSEzequiel Garcia /* free the skb's in the tx ring */ 2304c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) 2305c5aff182SThomas Petazzoni mvneta_txq_done_force(pp, &pp->txqs[queue]); 2306c5aff182SThomas Petazzoni 2307c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 2308c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 2309c5aff182SThomas Petazzoni } 2310c5aff182SThomas Petazzoni 2311c5aff182SThomas Petazzoni static void mvneta_rx_reset(struct mvneta_port *pp) 2312c5aff182SThomas Petazzoni { 2313c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 2314c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 2315c5aff182SThomas Petazzoni } 2316c5aff182SThomas Petazzoni 2317c5aff182SThomas Petazzoni /* Rx/Tx queue initialization/cleanup methods */ 2318c5aff182SThomas Petazzoni 2319c5aff182SThomas Petazzoni /* Create a specified RX queue */ 2320c5aff182SThomas Petazzoni static int mvneta_rxq_init(struct mvneta_port *pp, 2321c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 2322c5aff182SThomas Petazzoni 2323c5aff182SThomas Petazzoni { 2324c5aff182SThomas Petazzoni rxq->size = pp->rx_ring_size; 2325c5aff182SThomas Petazzoni 2326c5aff182SThomas Petazzoni /* Allocate memory for RX descriptors */ 2327c5aff182SThomas Petazzoni rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2328c5aff182SThomas Petazzoni rxq->size * MVNETA_DESC_ALIGNED_SIZE, 2329c5aff182SThomas Petazzoni &rxq->descs_phys, GFP_KERNEL); 2330d0320f75SJoe Perches if (rxq->descs == NULL) 2331c5aff182SThomas Petazzoni return -ENOMEM; 2332c5aff182SThomas Petazzoni 2333c5aff182SThomas Petazzoni BUG_ON(rxq->descs != 2334c5aff182SThomas Petazzoni PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); 2335c5aff182SThomas Petazzoni 2336c5aff182SThomas Petazzoni rxq->last_desc = rxq->size - 1; 2337c5aff182SThomas Petazzoni 2338c5aff182SThomas Petazzoni /* Set Rx descriptors queue starting address */ 2339c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); 2340c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); 2341c5aff182SThomas Petazzoni 2342c5aff182SThomas Petazzoni /* Set Offset */ 2343c5aff182SThomas Petazzoni mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD); 2344c5aff182SThomas Petazzoni 2345c5aff182SThomas Petazzoni /* Set coalescing pkts and time */ 2346c5aff182SThomas Petazzoni mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 2347c5aff182SThomas Petazzoni mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 2348c5aff182SThomas Petazzoni 2349c5aff182SThomas Petazzoni /* Fill RXQ with buffers from RX pool */ 2350c5aff182SThomas Petazzoni mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); 2351c5aff182SThomas Petazzoni mvneta_rxq_bm_disable(pp, rxq); 2352c5aff182SThomas Petazzoni mvneta_rxq_fill(pp, rxq, rxq->size); 2353c5aff182SThomas Petazzoni 2354c5aff182SThomas Petazzoni return 0; 2355c5aff182SThomas Petazzoni } 2356c5aff182SThomas Petazzoni 2357c5aff182SThomas Petazzoni /* Cleanup Rx queue */ 2358c5aff182SThomas Petazzoni static void mvneta_rxq_deinit(struct mvneta_port *pp, 2359c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 2360c5aff182SThomas Petazzoni { 2361c5aff182SThomas Petazzoni mvneta_rxq_drop_pkts(pp, rxq); 2362c5aff182SThomas Petazzoni 2363c5aff182SThomas Petazzoni if (rxq->descs) 2364c5aff182SThomas Petazzoni dma_free_coherent(pp->dev->dev.parent, 2365c5aff182SThomas Petazzoni rxq->size * MVNETA_DESC_ALIGNED_SIZE, 2366c5aff182SThomas Petazzoni rxq->descs, 2367c5aff182SThomas Petazzoni rxq->descs_phys); 2368c5aff182SThomas Petazzoni 2369c5aff182SThomas Petazzoni rxq->descs = NULL; 2370c5aff182SThomas Petazzoni rxq->last_desc = 0; 2371c5aff182SThomas Petazzoni rxq->next_desc_to_proc = 0; 2372c5aff182SThomas Petazzoni rxq->descs_phys = 0; 2373c5aff182SThomas Petazzoni } 2374c5aff182SThomas Petazzoni 2375c5aff182SThomas Petazzoni /* Create and initialize a tx queue */ 2376c5aff182SThomas Petazzoni static int mvneta_txq_init(struct mvneta_port *pp, 2377c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 2378c5aff182SThomas Petazzoni { 237950bf8cb6SGregory CLEMENT int cpu; 238050bf8cb6SGregory CLEMENT 2381c5aff182SThomas Petazzoni txq->size = pp->tx_ring_size; 2382c5aff182SThomas Petazzoni 23838eef5f97SEzequiel Garcia /* A queue must always have room for at least one skb. 23848eef5f97SEzequiel Garcia * Therefore, stop the queue when the free entries reaches 23858eef5f97SEzequiel Garcia * the maximum number of descriptors per skb. 23868eef5f97SEzequiel Garcia */ 23878eef5f97SEzequiel Garcia txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; 23888eef5f97SEzequiel Garcia txq->tx_wake_threshold = txq->tx_stop_threshold / 2; 23898eef5f97SEzequiel Garcia 23908eef5f97SEzequiel Garcia 2391c5aff182SThomas Petazzoni /* Allocate memory for TX descriptors */ 2392c5aff182SThomas Petazzoni txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2393c5aff182SThomas Petazzoni txq->size * MVNETA_DESC_ALIGNED_SIZE, 2394c5aff182SThomas Petazzoni &txq->descs_phys, GFP_KERNEL); 2395d0320f75SJoe Perches if (txq->descs == NULL) 2396c5aff182SThomas Petazzoni return -ENOMEM; 2397c5aff182SThomas Petazzoni 2398c5aff182SThomas Petazzoni /* Make sure descriptor address is cache line size aligned */ 2399c5aff182SThomas Petazzoni BUG_ON(txq->descs != 2400c5aff182SThomas Petazzoni PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); 2401c5aff182SThomas Petazzoni 2402c5aff182SThomas Petazzoni txq->last_desc = txq->size - 1; 2403c5aff182SThomas Petazzoni 2404c5aff182SThomas Petazzoni /* Set maximum bandwidth for enabled TXQs */ 2405c5aff182SThomas Petazzoni mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); 2406c5aff182SThomas Petazzoni mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); 2407c5aff182SThomas Petazzoni 2408c5aff182SThomas Petazzoni /* Set Tx descriptors queue starting address */ 2409c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); 2410c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); 2411c5aff182SThomas Petazzoni 2412c5aff182SThomas Petazzoni txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL); 2413c5aff182SThomas Petazzoni if (txq->tx_skb == NULL) { 2414c5aff182SThomas Petazzoni dma_free_coherent(pp->dev->dev.parent, 2415c5aff182SThomas Petazzoni txq->size * MVNETA_DESC_ALIGNED_SIZE, 2416c5aff182SThomas Petazzoni txq->descs, txq->descs_phys); 2417c5aff182SThomas Petazzoni return -ENOMEM; 2418c5aff182SThomas Petazzoni } 24192adb719dSEzequiel Garcia 24202adb719dSEzequiel Garcia /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 24212adb719dSEzequiel Garcia txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, 24222adb719dSEzequiel Garcia txq->size * TSO_HEADER_SIZE, 24232adb719dSEzequiel Garcia &txq->tso_hdrs_phys, GFP_KERNEL); 24242adb719dSEzequiel Garcia if (txq->tso_hdrs == NULL) { 24252adb719dSEzequiel Garcia kfree(txq->tx_skb); 24262adb719dSEzequiel Garcia dma_free_coherent(pp->dev->dev.parent, 24272adb719dSEzequiel Garcia txq->size * MVNETA_DESC_ALIGNED_SIZE, 24282adb719dSEzequiel Garcia txq->descs, txq->descs_phys); 24292adb719dSEzequiel Garcia return -ENOMEM; 24302adb719dSEzequiel Garcia } 2431c5aff182SThomas Petazzoni mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 2432c5aff182SThomas Petazzoni 243350bf8cb6SGregory CLEMENT /* Setup XPS mapping */ 243450bf8cb6SGregory CLEMENT if (txq_number > 1) 243550bf8cb6SGregory CLEMENT cpu = txq->id % num_present_cpus(); 243650bf8cb6SGregory CLEMENT else 243750bf8cb6SGregory CLEMENT cpu = pp->rxq_def % num_present_cpus(); 243850bf8cb6SGregory CLEMENT cpumask_set_cpu(cpu, &txq->affinity_mask); 243950bf8cb6SGregory CLEMENT netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); 244050bf8cb6SGregory CLEMENT 2441c5aff182SThomas Petazzoni return 0; 2442c5aff182SThomas Petazzoni } 2443c5aff182SThomas Petazzoni 2444c5aff182SThomas Petazzoni /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ 2445c5aff182SThomas Petazzoni static void mvneta_txq_deinit(struct mvneta_port *pp, 2446c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 2447c5aff182SThomas Petazzoni { 2448c5aff182SThomas Petazzoni kfree(txq->tx_skb); 2449c5aff182SThomas Petazzoni 24502adb719dSEzequiel Garcia if (txq->tso_hdrs) 24512adb719dSEzequiel Garcia dma_free_coherent(pp->dev->dev.parent, 24522adb719dSEzequiel Garcia txq->size * TSO_HEADER_SIZE, 24532adb719dSEzequiel Garcia txq->tso_hdrs, txq->tso_hdrs_phys); 2454c5aff182SThomas Petazzoni if (txq->descs) 2455c5aff182SThomas Petazzoni dma_free_coherent(pp->dev->dev.parent, 2456c5aff182SThomas Petazzoni txq->size * MVNETA_DESC_ALIGNED_SIZE, 2457c5aff182SThomas Petazzoni txq->descs, txq->descs_phys); 2458c5aff182SThomas Petazzoni 2459c5aff182SThomas Petazzoni txq->descs = NULL; 2460c5aff182SThomas Petazzoni txq->last_desc = 0; 2461c5aff182SThomas Petazzoni txq->next_desc_to_proc = 0; 2462c5aff182SThomas Petazzoni txq->descs_phys = 0; 2463c5aff182SThomas Petazzoni 2464c5aff182SThomas Petazzoni /* Set minimum bandwidth for disabled TXQs */ 2465c5aff182SThomas Petazzoni mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); 2466c5aff182SThomas Petazzoni mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); 2467c5aff182SThomas Petazzoni 2468c5aff182SThomas Petazzoni /* Set Tx descriptors queue starting address and size */ 2469c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); 2470c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); 2471c5aff182SThomas Petazzoni } 2472c5aff182SThomas Petazzoni 2473c5aff182SThomas Petazzoni /* Cleanup all Tx queues */ 2474c5aff182SThomas Petazzoni static void mvneta_cleanup_txqs(struct mvneta_port *pp) 2475c5aff182SThomas Petazzoni { 2476c5aff182SThomas Petazzoni int queue; 2477c5aff182SThomas Petazzoni 2478c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) 2479c5aff182SThomas Petazzoni mvneta_txq_deinit(pp, &pp->txqs[queue]); 2480c5aff182SThomas Petazzoni } 2481c5aff182SThomas Petazzoni 2482c5aff182SThomas Petazzoni /* Cleanup all Rx queues */ 2483c5aff182SThomas Petazzoni static void mvneta_cleanup_rxqs(struct mvneta_port *pp) 2484c5aff182SThomas Petazzoni { 24852dcf75e2SGregory CLEMENT int queue; 24862dcf75e2SGregory CLEMENT 24872dcf75e2SGregory CLEMENT for (queue = 0; queue < txq_number; queue++) 24882dcf75e2SGregory CLEMENT mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 2489c5aff182SThomas Petazzoni } 2490c5aff182SThomas Petazzoni 2491c5aff182SThomas Petazzoni 2492c5aff182SThomas Petazzoni /* Init all Rx queues */ 2493c5aff182SThomas Petazzoni static int mvneta_setup_rxqs(struct mvneta_port *pp) 2494c5aff182SThomas Petazzoni { 24952dcf75e2SGregory CLEMENT int queue; 24962dcf75e2SGregory CLEMENT 24972dcf75e2SGregory CLEMENT for (queue = 0; queue < rxq_number; queue++) { 24982dcf75e2SGregory CLEMENT int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); 24992dcf75e2SGregory CLEMENT 2500c5aff182SThomas Petazzoni if (err) { 2501c5aff182SThomas Petazzoni netdev_err(pp->dev, "%s: can't create rxq=%d\n", 25022dcf75e2SGregory CLEMENT __func__, queue); 2503c5aff182SThomas Petazzoni mvneta_cleanup_rxqs(pp); 2504c5aff182SThomas Petazzoni return err; 2505c5aff182SThomas Petazzoni } 25062dcf75e2SGregory CLEMENT } 2507c5aff182SThomas Petazzoni 2508c5aff182SThomas Petazzoni return 0; 2509c5aff182SThomas Petazzoni } 2510c5aff182SThomas Petazzoni 2511c5aff182SThomas Petazzoni /* Init all tx queues */ 2512c5aff182SThomas Petazzoni static int mvneta_setup_txqs(struct mvneta_port *pp) 2513c5aff182SThomas Petazzoni { 2514c5aff182SThomas Petazzoni int queue; 2515c5aff182SThomas Petazzoni 2516c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 2517c5aff182SThomas Petazzoni int err = mvneta_txq_init(pp, &pp->txqs[queue]); 2518c5aff182SThomas Petazzoni if (err) { 2519c5aff182SThomas Petazzoni netdev_err(pp->dev, "%s: can't create txq=%d\n", 2520c5aff182SThomas Petazzoni __func__, queue); 2521c5aff182SThomas Petazzoni mvneta_cleanup_txqs(pp); 2522c5aff182SThomas Petazzoni return err; 2523c5aff182SThomas Petazzoni } 2524c5aff182SThomas Petazzoni } 2525c5aff182SThomas Petazzoni 2526c5aff182SThomas Petazzoni return 0; 2527c5aff182SThomas Petazzoni } 2528c5aff182SThomas Petazzoni 25292dcf75e2SGregory CLEMENT static void mvneta_percpu_unmask_interrupt(void *arg) 25302dcf75e2SGregory CLEMENT { 25312dcf75e2SGregory CLEMENT struct mvneta_port *pp = arg; 25322dcf75e2SGregory CLEMENT 25332dcf75e2SGregory CLEMENT /* All the queue are unmasked, but actually only the ones 25342dcf75e2SGregory CLEMENT * maped to this CPU will be unmasked 25352dcf75e2SGregory CLEMENT */ 25362dcf75e2SGregory CLEMENT mvreg_write(pp, MVNETA_INTR_NEW_MASK, 25372dcf75e2SGregory CLEMENT MVNETA_RX_INTR_MASK_ALL | 25382dcf75e2SGregory CLEMENT MVNETA_TX_INTR_MASK_ALL | 25392dcf75e2SGregory CLEMENT MVNETA_MISCINTR_INTR_MASK); 25402dcf75e2SGregory CLEMENT } 25412dcf75e2SGregory CLEMENT 25429a401deaSGregory CLEMENT static void mvneta_percpu_mask_interrupt(void *arg) 25439a401deaSGregory CLEMENT { 25449a401deaSGregory CLEMENT struct mvneta_port *pp = arg; 25459a401deaSGregory CLEMENT 25469a401deaSGregory CLEMENT /* All the queue are masked, but actually only the ones 25479a401deaSGregory CLEMENT * maped to this CPU will be masked 25489a401deaSGregory CLEMENT */ 25499a401deaSGregory CLEMENT mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 25509a401deaSGregory CLEMENT mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 25519a401deaSGregory CLEMENT mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 25529a401deaSGregory CLEMENT } 25539a401deaSGregory CLEMENT 2554c5aff182SThomas Petazzoni static void mvneta_start_dev(struct mvneta_port *pp) 2555c5aff182SThomas Petazzoni { 255612bb03b4SMaxime Ripard unsigned int cpu; 255712bb03b4SMaxime Ripard 2558c5aff182SThomas Petazzoni mvneta_max_rx_size_set(pp, pp->pkt_size); 2559c5aff182SThomas Petazzoni mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 2560c5aff182SThomas Petazzoni 2561c5aff182SThomas Petazzoni /* start the Rx/Tx activity */ 2562c5aff182SThomas Petazzoni mvneta_port_enable(pp); 2563c5aff182SThomas Petazzoni 2564c5aff182SThomas Petazzoni /* Enable polling on the port */ 256512bb03b4SMaxime Ripard for_each_present_cpu(cpu) { 256612bb03b4SMaxime Ripard struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 256712bb03b4SMaxime Ripard 256812bb03b4SMaxime Ripard napi_enable(&port->napi); 256912bb03b4SMaxime Ripard } 2570c5aff182SThomas Petazzoni 25712dcf75e2SGregory CLEMENT /* Unmask interrupts. It has to be done from each CPU */ 25722dcf75e2SGregory CLEMENT for_each_online_cpu(cpu) 25732dcf75e2SGregory CLEMENT smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 25742dcf75e2SGregory CLEMENT pp, true); 2575898b2970SStas Sergeev mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2576898b2970SStas Sergeev MVNETA_CAUSE_PHY_STATUS_CHANGE | 2577898b2970SStas Sergeev MVNETA_CAUSE_LINK_CHANGE | 2578898b2970SStas Sergeev MVNETA_CAUSE_PSC_SYNC_CHANGE); 2579c5aff182SThomas Petazzoni 2580c5aff182SThomas Petazzoni phy_start(pp->phy_dev); 2581c5aff182SThomas Petazzoni netif_tx_start_all_queues(pp->dev); 2582c5aff182SThomas Petazzoni } 2583c5aff182SThomas Petazzoni 2584c5aff182SThomas Petazzoni static void mvneta_stop_dev(struct mvneta_port *pp) 2585c5aff182SThomas Petazzoni { 258612bb03b4SMaxime Ripard unsigned int cpu; 258712bb03b4SMaxime Ripard 2588c5aff182SThomas Petazzoni phy_stop(pp->phy_dev); 2589c5aff182SThomas Petazzoni 259012bb03b4SMaxime Ripard for_each_present_cpu(cpu) { 259112bb03b4SMaxime Ripard struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 259212bb03b4SMaxime Ripard 259312bb03b4SMaxime Ripard napi_disable(&port->napi); 259412bb03b4SMaxime Ripard } 2595c5aff182SThomas Petazzoni 2596c5aff182SThomas Petazzoni netif_carrier_off(pp->dev); 2597c5aff182SThomas Petazzoni 2598c5aff182SThomas Petazzoni mvneta_port_down(pp); 2599c5aff182SThomas Petazzoni netif_tx_stop_all_queues(pp->dev); 2600c5aff182SThomas Petazzoni 2601c5aff182SThomas Petazzoni /* Stop the port activity */ 2602c5aff182SThomas Petazzoni mvneta_port_disable(pp); 2603c5aff182SThomas Petazzoni 2604c5aff182SThomas Petazzoni /* Clear all ethernet port interrupts */ 2605c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2606c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 2607c5aff182SThomas Petazzoni 2608c5aff182SThomas Petazzoni /* Mask all ethernet port interrupts */ 2609c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2610c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2611c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 2612c5aff182SThomas Petazzoni 2613c5aff182SThomas Petazzoni mvneta_tx_reset(pp); 2614c5aff182SThomas Petazzoni mvneta_rx_reset(pp); 2615c5aff182SThomas Petazzoni } 2616c5aff182SThomas Petazzoni 2617c5aff182SThomas Petazzoni /* Return positive if MTU is valid */ 2618c5aff182SThomas Petazzoni static int mvneta_check_mtu_valid(struct net_device *dev, int mtu) 2619c5aff182SThomas Petazzoni { 2620c5aff182SThomas Petazzoni if (mtu < 68) { 2621c5aff182SThomas Petazzoni netdev_err(dev, "cannot change mtu to less than 68\n"); 2622c5aff182SThomas Petazzoni return -EINVAL; 2623c5aff182SThomas Petazzoni } 2624c5aff182SThomas Petazzoni 2625c5aff182SThomas Petazzoni /* 9676 == 9700 - 20 and rounding to 8 */ 2626c5aff182SThomas Petazzoni if (mtu > 9676) { 2627c5aff182SThomas Petazzoni netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu); 2628c5aff182SThomas Petazzoni mtu = 9676; 2629c5aff182SThomas Petazzoni } 2630c5aff182SThomas Petazzoni 2631c5aff182SThomas Petazzoni if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { 2632c5aff182SThomas Petazzoni netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", 2633c5aff182SThomas Petazzoni mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); 2634c5aff182SThomas Petazzoni mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); 2635c5aff182SThomas Petazzoni } 2636c5aff182SThomas Petazzoni 2637c5aff182SThomas Petazzoni return mtu; 2638c5aff182SThomas Petazzoni } 2639c5aff182SThomas Petazzoni 2640c5aff182SThomas Petazzoni /* Change the device mtu */ 2641c5aff182SThomas Petazzoni static int mvneta_change_mtu(struct net_device *dev, int mtu) 2642c5aff182SThomas Petazzoni { 2643c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 2644c5aff182SThomas Petazzoni int ret; 2645c5aff182SThomas Petazzoni 2646c5aff182SThomas Petazzoni mtu = mvneta_check_mtu_valid(dev, mtu); 2647c5aff182SThomas Petazzoni if (mtu < 0) 2648c5aff182SThomas Petazzoni return -EINVAL; 2649c5aff182SThomas Petazzoni 2650c5aff182SThomas Petazzoni dev->mtu = mtu; 2651c5aff182SThomas Petazzoni 2652b65657fcSSimon Guinot if (!netif_running(dev)) { 2653b65657fcSSimon Guinot netdev_update_features(dev); 2654c5aff182SThomas Petazzoni return 0; 2655b65657fcSSimon Guinot } 2656c5aff182SThomas Petazzoni 26576a20c175SThomas Petazzoni /* The interface is running, so we have to force a 2658a92dbd96SEzequiel Garcia * reallocation of the queues 2659c5aff182SThomas Petazzoni */ 2660c5aff182SThomas Petazzoni mvneta_stop_dev(pp); 2661c5aff182SThomas Petazzoni 2662c5aff182SThomas Petazzoni mvneta_cleanup_txqs(pp); 2663c5aff182SThomas Petazzoni mvneta_cleanup_rxqs(pp); 2664c5aff182SThomas Petazzoni 2665a92dbd96SEzequiel Garcia pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); 26668ec2cd48Swilly tarreau pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 26678ec2cd48Swilly tarreau SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2668c5aff182SThomas Petazzoni 2669c5aff182SThomas Petazzoni ret = mvneta_setup_rxqs(pp); 2670c5aff182SThomas Petazzoni if (ret) { 2671a92dbd96SEzequiel Garcia netdev_err(dev, "unable to setup rxqs after MTU change\n"); 2672c5aff182SThomas Petazzoni return ret; 2673c5aff182SThomas Petazzoni } 2674c5aff182SThomas Petazzoni 2675a92dbd96SEzequiel Garcia ret = mvneta_setup_txqs(pp); 2676a92dbd96SEzequiel Garcia if (ret) { 2677a92dbd96SEzequiel Garcia netdev_err(dev, "unable to setup txqs after MTU change\n"); 2678a92dbd96SEzequiel Garcia return ret; 2679a92dbd96SEzequiel Garcia } 2680c5aff182SThomas Petazzoni 2681c5aff182SThomas Petazzoni mvneta_start_dev(pp); 2682c5aff182SThomas Petazzoni mvneta_port_up(pp); 2683c5aff182SThomas Petazzoni 2684b65657fcSSimon Guinot netdev_update_features(dev); 2685b65657fcSSimon Guinot 2686c5aff182SThomas Petazzoni return 0; 2687c5aff182SThomas Petazzoni } 2688c5aff182SThomas Petazzoni 2689b65657fcSSimon Guinot static netdev_features_t mvneta_fix_features(struct net_device *dev, 2690b65657fcSSimon Guinot netdev_features_t features) 2691b65657fcSSimon Guinot { 2692b65657fcSSimon Guinot struct mvneta_port *pp = netdev_priv(dev); 2693b65657fcSSimon Guinot 2694b65657fcSSimon Guinot if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { 2695b65657fcSSimon Guinot features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 2696b65657fcSSimon Guinot netdev_info(dev, 2697b65657fcSSimon Guinot "Disable IP checksum for MTU greater than %dB\n", 2698b65657fcSSimon Guinot pp->tx_csum_limit); 2699b65657fcSSimon Guinot } 2700b65657fcSSimon Guinot 2701b65657fcSSimon Guinot return features; 2702b65657fcSSimon Guinot } 2703b65657fcSSimon Guinot 27048cc3e439SThomas Petazzoni /* Get mac address */ 27058cc3e439SThomas Petazzoni static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) 27068cc3e439SThomas Petazzoni { 27078cc3e439SThomas Petazzoni u32 mac_addr_l, mac_addr_h; 27088cc3e439SThomas Petazzoni 27098cc3e439SThomas Petazzoni mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); 27108cc3e439SThomas Petazzoni mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); 27118cc3e439SThomas Petazzoni addr[0] = (mac_addr_h >> 24) & 0xFF; 27128cc3e439SThomas Petazzoni addr[1] = (mac_addr_h >> 16) & 0xFF; 27138cc3e439SThomas Petazzoni addr[2] = (mac_addr_h >> 8) & 0xFF; 27148cc3e439SThomas Petazzoni addr[3] = mac_addr_h & 0xFF; 27158cc3e439SThomas Petazzoni addr[4] = (mac_addr_l >> 8) & 0xFF; 27168cc3e439SThomas Petazzoni addr[5] = mac_addr_l & 0xFF; 27178cc3e439SThomas Petazzoni } 27188cc3e439SThomas Petazzoni 2719c5aff182SThomas Petazzoni /* Handle setting mac address */ 2720c5aff182SThomas Petazzoni static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 2721c5aff182SThomas Petazzoni { 2722c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 2723e68de360SEzequiel Garcia struct sockaddr *sockaddr = addr; 2724e68de360SEzequiel Garcia int ret; 2725c5aff182SThomas Petazzoni 2726e68de360SEzequiel Garcia ret = eth_prepare_mac_addr_change(dev, addr); 2727e68de360SEzequiel Garcia if (ret < 0) 2728e68de360SEzequiel Garcia return ret; 2729c5aff182SThomas Petazzoni /* Remove previous address table entry */ 2730c5aff182SThomas Petazzoni mvneta_mac_addr_set(pp, dev->dev_addr, -1); 2731c5aff182SThomas Petazzoni 2732c5aff182SThomas Petazzoni /* Set new addr in hw */ 273390b74c01SGregory CLEMENT mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); 2734c5aff182SThomas Petazzoni 2735e68de360SEzequiel Garcia eth_commit_mac_addr_change(dev, addr); 2736c5aff182SThomas Petazzoni return 0; 2737c5aff182SThomas Petazzoni } 2738c5aff182SThomas Petazzoni 2739c5aff182SThomas Petazzoni static void mvneta_adjust_link(struct net_device *ndev) 2740c5aff182SThomas Petazzoni { 2741c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(ndev); 2742c5aff182SThomas Petazzoni struct phy_device *phydev = pp->phy_dev; 2743c5aff182SThomas Petazzoni int status_change = 0; 2744c5aff182SThomas Petazzoni 2745c5aff182SThomas Petazzoni if (phydev->link) { 2746c5aff182SThomas Petazzoni if ((pp->speed != phydev->speed) || 2747c5aff182SThomas Petazzoni (pp->duplex != phydev->duplex)) { 2748c5aff182SThomas Petazzoni u32 val; 2749c5aff182SThomas Petazzoni 2750c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2751c5aff182SThomas Petazzoni val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | 2752c5aff182SThomas Petazzoni MVNETA_GMAC_CONFIG_GMII_SPEED | 2753898b2970SStas Sergeev MVNETA_GMAC_CONFIG_FULL_DUPLEX); 2754c5aff182SThomas Petazzoni 2755c5aff182SThomas Petazzoni if (phydev->duplex) 2756c5aff182SThomas Petazzoni val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 2757c5aff182SThomas Petazzoni 2758c5aff182SThomas Petazzoni if (phydev->speed == SPEED_1000) 2759c5aff182SThomas Petazzoni val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 27604d12bc63SThomas Petazzoni else if (phydev->speed == SPEED_100) 2761c5aff182SThomas Petazzoni val |= MVNETA_GMAC_CONFIG_MII_SPEED; 2762c5aff182SThomas Petazzoni 2763c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2764c5aff182SThomas Petazzoni 2765c5aff182SThomas Petazzoni pp->duplex = phydev->duplex; 2766c5aff182SThomas Petazzoni pp->speed = phydev->speed; 2767c5aff182SThomas Petazzoni } 2768c5aff182SThomas Petazzoni } 2769c5aff182SThomas Petazzoni 2770c5aff182SThomas Petazzoni if (phydev->link != pp->link) { 2771c5aff182SThomas Petazzoni if (!phydev->link) { 2772c5aff182SThomas Petazzoni pp->duplex = -1; 2773c5aff182SThomas Petazzoni pp->speed = 0; 2774c5aff182SThomas Petazzoni } 2775c5aff182SThomas Petazzoni 2776c5aff182SThomas Petazzoni pp->link = phydev->link; 2777c5aff182SThomas Petazzoni status_change = 1; 2778c5aff182SThomas Petazzoni } 2779c5aff182SThomas Petazzoni 2780c5aff182SThomas Petazzoni if (status_change) { 2781c5aff182SThomas Petazzoni if (phydev->link) { 2782898b2970SStas Sergeev if (!pp->use_inband_status) { 2783898b2970SStas Sergeev u32 val = mvreg_read(pp, 2784898b2970SStas Sergeev MVNETA_GMAC_AUTONEG_CONFIG); 2785898b2970SStas Sergeev val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; 2786898b2970SStas Sergeev val |= MVNETA_GMAC_FORCE_LINK_PASS; 2787898b2970SStas Sergeev mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 2788898b2970SStas Sergeev val); 2789898b2970SStas Sergeev } 2790c5aff182SThomas Petazzoni mvneta_port_up(pp); 2791c5aff182SThomas Petazzoni } else { 2792898b2970SStas Sergeev if (!pp->use_inband_status) { 2793898b2970SStas Sergeev u32 val = mvreg_read(pp, 2794898b2970SStas Sergeev MVNETA_GMAC_AUTONEG_CONFIG); 2795898b2970SStas Sergeev val &= ~MVNETA_GMAC_FORCE_LINK_PASS; 2796898b2970SStas Sergeev val |= MVNETA_GMAC_FORCE_LINK_DOWN; 2797898b2970SStas Sergeev mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 2798898b2970SStas Sergeev val); 2799898b2970SStas Sergeev } 2800c5aff182SThomas Petazzoni mvneta_port_down(pp); 2801c5aff182SThomas Petazzoni } 28020089b745SEzequiel Garcia phy_print_status(phydev); 2803c5aff182SThomas Petazzoni } 2804c5aff182SThomas Petazzoni } 2805c5aff182SThomas Petazzoni 2806c5aff182SThomas Petazzoni static int mvneta_mdio_probe(struct mvneta_port *pp) 2807c5aff182SThomas Petazzoni { 2808c5aff182SThomas Petazzoni struct phy_device *phy_dev; 2809c5aff182SThomas Petazzoni 2810c5aff182SThomas Petazzoni phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, 2811c5aff182SThomas Petazzoni pp->phy_interface); 2812c5aff182SThomas Petazzoni if (!phy_dev) { 2813c5aff182SThomas Petazzoni netdev_err(pp->dev, "could not find the PHY\n"); 2814c5aff182SThomas Petazzoni return -ENODEV; 2815c5aff182SThomas Petazzoni } 2816c5aff182SThomas Petazzoni 2817c5aff182SThomas Petazzoni phy_dev->supported &= PHY_GBIT_FEATURES; 2818c5aff182SThomas Petazzoni phy_dev->advertising = phy_dev->supported; 2819c5aff182SThomas Petazzoni 2820c5aff182SThomas Petazzoni pp->phy_dev = phy_dev; 2821c5aff182SThomas Petazzoni pp->link = 0; 2822c5aff182SThomas Petazzoni pp->duplex = 0; 2823c5aff182SThomas Petazzoni pp->speed = 0; 2824c5aff182SThomas Petazzoni 2825c5aff182SThomas Petazzoni return 0; 2826c5aff182SThomas Petazzoni } 2827c5aff182SThomas Petazzoni 2828c5aff182SThomas Petazzoni static void mvneta_mdio_remove(struct mvneta_port *pp) 2829c5aff182SThomas Petazzoni { 2830c5aff182SThomas Petazzoni phy_disconnect(pp->phy_dev); 2831c5aff182SThomas Petazzoni pp->phy_dev = NULL; 2832c5aff182SThomas Petazzoni } 2833c5aff182SThomas Petazzoni 2834f8642885SMaxime Ripard static void mvneta_percpu_enable(void *arg) 2835f8642885SMaxime Ripard { 2836f8642885SMaxime Ripard struct mvneta_port *pp = arg; 2837f8642885SMaxime Ripard 2838f8642885SMaxime Ripard enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); 2839f8642885SMaxime Ripard } 2840f8642885SMaxime Ripard 2841f8642885SMaxime Ripard static void mvneta_percpu_disable(void *arg) 2842f8642885SMaxime Ripard { 2843f8642885SMaxime Ripard struct mvneta_port *pp = arg; 2844f8642885SMaxime Ripard 2845f8642885SMaxime Ripard disable_percpu_irq(pp->dev->irq); 2846f8642885SMaxime Ripard } 2847f8642885SMaxime Ripard 2848f8642885SMaxime Ripard static void mvneta_percpu_elect(struct mvneta_port *pp) 2849f8642885SMaxime Ripard { 28502dcf75e2SGregory CLEMENT int online_cpu_idx, max_cpu, cpu, i = 0; 2851f8642885SMaxime Ripard 285290b74c01SGregory CLEMENT online_cpu_idx = pp->rxq_def % num_online_cpus(); 28532dcf75e2SGregory CLEMENT max_cpu = num_present_cpus(); 2854f8642885SMaxime Ripard 2855f8642885SMaxime Ripard for_each_online_cpu(cpu) { 28562dcf75e2SGregory CLEMENT int rxq_map = 0, txq_map = 0; 28572dcf75e2SGregory CLEMENT int rxq; 28582dcf75e2SGregory CLEMENT 28592dcf75e2SGregory CLEMENT for (rxq = 0; rxq < rxq_number; rxq++) 28602dcf75e2SGregory CLEMENT if ((rxq % max_cpu) == cpu) 28612dcf75e2SGregory CLEMENT rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 28622dcf75e2SGregory CLEMENT 286350bf8cb6SGregory CLEMENT if (i == online_cpu_idx) 286450bf8cb6SGregory CLEMENT /* Map the default receive queue queue to the 286550bf8cb6SGregory CLEMENT * elected CPU 2866f8642885SMaxime Ripard */ 28672dcf75e2SGregory CLEMENT rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); 286850bf8cb6SGregory CLEMENT 286950bf8cb6SGregory CLEMENT /* We update the TX queue map only if we have one 287050bf8cb6SGregory CLEMENT * queue. In this case we associate the TX queue to 287150bf8cb6SGregory CLEMENT * the CPU bound to the default RX queue 287250bf8cb6SGregory CLEMENT */ 287350bf8cb6SGregory CLEMENT if (txq_number == 1) 287450bf8cb6SGregory CLEMENT txq_map = (i == online_cpu_idx) ? 287550bf8cb6SGregory CLEMENT MVNETA_CPU_TXQ_ACCESS(1) : 0; 287650bf8cb6SGregory CLEMENT else 287750bf8cb6SGregory CLEMENT txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 287850bf8cb6SGregory CLEMENT MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 287950bf8cb6SGregory CLEMENT 28802dcf75e2SGregory CLEMENT mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 28812dcf75e2SGregory CLEMENT 28822dcf75e2SGregory CLEMENT /* Update the interrupt mask on each CPU according the 28832dcf75e2SGregory CLEMENT * new mapping 28842dcf75e2SGregory CLEMENT */ 28852dcf75e2SGregory CLEMENT smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 2886f8642885SMaxime Ripard pp, true); 2887f8642885SMaxime Ripard i++; 28882dcf75e2SGregory CLEMENT 2889f8642885SMaxime Ripard } 2890f8642885SMaxime Ripard }; 2891f8642885SMaxime Ripard 2892f8642885SMaxime Ripard static int mvneta_percpu_notifier(struct notifier_block *nfb, 2893f8642885SMaxime Ripard unsigned long action, void *hcpu) 2894f8642885SMaxime Ripard { 2895f8642885SMaxime Ripard struct mvneta_port *pp = container_of(nfb, struct mvneta_port, 2896f8642885SMaxime Ripard cpu_notifier); 2897f8642885SMaxime Ripard int cpu = (unsigned long)hcpu, other_cpu; 2898f8642885SMaxime Ripard struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2899f8642885SMaxime Ripard 2900f8642885SMaxime Ripard switch (action) { 2901f8642885SMaxime Ripard case CPU_ONLINE: 2902f8642885SMaxime Ripard case CPU_ONLINE_FROZEN: 2903f8642885SMaxime Ripard netif_tx_stop_all_queues(pp->dev); 2904f8642885SMaxime Ripard 2905f8642885SMaxime Ripard /* We have to synchronise on tha napi of each CPU 2906f8642885SMaxime Ripard * except the one just being waked up 2907f8642885SMaxime Ripard */ 2908f8642885SMaxime Ripard for_each_online_cpu(other_cpu) { 2909f8642885SMaxime Ripard if (other_cpu != cpu) { 2910f8642885SMaxime Ripard struct mvneta_pcpu_port *other_port = 2911f8642885SMaxime Ripard per_cpu_ptr(pp->ports, other_cpu); 2912f8642885SMaxime Ripard 2913f8642885SMaxime Ripard napi_synchronize(&other_port->napi); 2914f8642885SMaxime Ripard } 2915f8642885SMaxime Ripard } 2916f8642885SMaxime Ripard 2917f8642885SMaxime Ripard /* Mask all ethernet port interrupts */ 2918f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2919f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2920f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 2921f8642885SMaxime Ripard napi_enable(&port->napi); 2922f8642885SMaxime Ripard 29232dcf75e2SGregory CLEMENT 29242dcf75e2SGregory CLEMENT /* Enable per-CPU interrupts on the CPU that is 29252dcf75e2SGregory CLEMENT * brought up. 29262dcf75e2SGregory CLEMENT */ 29272dcf75e2SGregory CLEMENT smp_call_function_single(cpu, mvneta_percpu_enable, 29282dcf75e2SGregory CLEMENT pp, true); 29292dcf75e2SGregory CLEMENT 2930f8642885SMaxime Ripard /* Enable per-CPU interrupt on the one CPU we care 2931f8642885SMaxime Ripard * about. 2932f8642885SMaxime Ripard */ 2933f8642885SMaxime Ripard mvneta_percpu_elect(pp); 2934f8642885SMaxime Ripard 29352dcf75e2SGregory CLEMENT /* Unmask all ethernet port interrupts, as this 29362dcf75e2SGregory CLEMENT * notifier is called for each CPU then the CPU to 29372dcf75e2SGregory CLEMENT * Queue mapping is applied 29382dcf75e2SGregory CLEMENT */ 2939f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2940f8642885SMaxime Ripard MVNETA_RX_INTR_MASK(rxq_number) | 2941f8642885SMaxime Ripard MVNETA_TX_INTR_MASK(txq_number) | 2942f8642885SMaxime Ripard MVNETA_MISCINTR_INTR_MASK); 2943f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2944f8642885SMaxime Ripard MVNETA_CAUSE_PHY_STATUS_CHANGE | 2945f8642885SMaxime Ripard MVNETA_CAUSE_LINK_CHANGE | 2946f8642885SMaxime Ripard MVNETA_CAUSE_PSC_SYNC_CHANGE); 2947f8642885SMaxime Ripard netif_tx_start_all_queues(pp->dev); 2948f8642885SMaxime Ripard break; 2949f8642885SMaxime Ripard case CPU_DOWN_PREPARE: 2950f8642885SMaxime Ripard case CPU_DOWN_PREPARE_FROZEN: 2951f8642885SMaxime Ripard netif_tx_stop_all_queues(pp->dev); 2952f8642885SMaxime Ripard /* Mask all ethernet port interrupts */ 2953f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2954f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2955f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 2956f8642885SMaxime Ripard 2957f8642885SMaxime Ripard napi_synchronize(&port->napi); 2958f8642885SMaxime Ripard napi_disable(&port->napi); 2959f8642885SMaxime Ripard /* Disable per-CPU interrupts on the CPU that is 2960f8642885SMaxime Ripard * brought down. 2961f8642885SMaxime Ripard */ 2962f8642885SMaxime Ripard smp_call_function_single(cpu, mvneta_percpu_disable, 2963f8642885SMaxime Ripard pp, true); 2964f8642885SMaxime Ripard 2965f8642885SMaxime Ripard break; 2966f8642885SMaxime Ripard case CPU_DEAD: 2967f8642885SMaxime Ripard case CPU_DEAD_FROZEN: 2968f8642885SMaxime Ripard /* Check if a new CPU must be elected now this on is down */ 2969f8642885SMaxime Ripard mvneta_percpu_elect(pp); 2970f8642885SMaxime Ripard /* Unmask all ethernet port interrupts */ 2971f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2972f8642885SMaxime Ripard MVNETA_RX_INTR_MASK(rxq_number) | 2973f8642885SMaxime Ripard MVNETA_TX_INTR_MASK(txq_number) | 2974f8642885SMaxime Ripard MVNETA_MISCINTR_INTR_MASK); 2975f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2976f8642885SMaxime Ripard MVNETA_CAUSE_PHY_STATUS_CHANGE | 2977f8642885SMaxime Ripard MVNETA_CAUSE_LINK_CHANGE | 2978f8642885SMaxime Ripard MVNETA_CAUSE_PSC_SYNC_CHANGE); 2979f8642885SMaxime Ripard netif_tx_start_all_queues(pp->dev); 2980f8642885SMaxime Ripard break; 2981f8642885SMaxime Ripard } 2982f8642885SMaxime Ripard 2983f8642885SMaxime Ripard return NOTIFY_OK; 2984f8642885SMaxime Ripard } 2985f8642885SMaxime Ripard 2986c5aff182SThomas Petazzoni static int mvneta_open(struct net_device *dev) 2987c5aff182SThomas Petazzoni { 2988c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 29892dcf75e2SGregory CLEMENT int ret, cpu; 2990c5aff182SThomas Petazzoni 2991c5aff182SThomas Petazzoni pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 29928ec2cd48Swilly tarreau pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 29938ec2cd48Swilly tarreau SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2994c5aff182SThomas Petazzoni 2995c5aff182SThomas Petazzoni ret = mvneta_setup_rxqs(pp); 2996c5aff182SThomas Petazzoni if (ret) 2997c5aff182SThomas Petazzoni return ret; 2998c5aff182SThomas Petazzoni 2999c5aff182SThomas Petazzoni ret = mvneta_setup_txqs(pp); 3000c5aff182SThomas Petazzoni if (ret) 3001c5aff182SThomas Petazzoni goto err_cleanup_rxqs; 3002c5aff182SThomas Petazzoni 3003c5aff182SThomas Petazzoni /* Connect to port interrupt line */ 300412bb03b4SMaxime Ripard ret = request_percpu_irq(pp->dev->irq, mvneta_isr, 300512bb03b4SMaxime Ripard MVNETA_DRIVER_NAME, pp->ports); 3006c5aff182SThomas Petazzoni if (ret) { 3007c5aff182SThomas Petazzoni netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); 3008c5aff182SThomas Petazzoni goto err_cleanup_txqs; 3009c5aff182SThomas Petazzoni } 3010c5aff182SThomas Petazzoni 3011f8642885SMaxime Ripard /* Even though the documentation says that request_percpu_irq 3012f8642885SMaxime Ripard * doesn't enable the interrupts automatically, it actually 3013f8642885SMaxime Ripard * does so on the local CPU. 3014f8642885SMaxime Ripard * 3015f8642885SMaxime Ripard * Make sure it's disabled. 3016f8642885SMaxime Ripard */ 3017f8642885SMaxime Ripard mvneta_percpu_disable(pp); 3018f8642885SMaxime Ripard 30192dcf75e2SGregory CLEMENT /* Enable per-CPU interrupt on all the CPU to handle our RX 30202dcf75e2SGregory CLEMENT * queue interrupts 30212dcf75e2SGregory CLEMENT */ 30222dcf75e2SGregory CLEMENT for_each_online_cpu(cpu) 30232dcf75e2SGregory CLEMENT smp_call_function_single(cpu, mvneta_percpu_enable, 30242dcf75e2SGregory CLEMENT pp, true); 30252dcf75e2SGregory CLEMENT 3026f8642885SMaxime Ripard 3027f8642885SMaxime Ripard /* Register a CPU notifier to handle the case where our CPU 3028f8642885SMaxime Ripard * might be taken offline. 3029f8642885SMaxime Ripard */ 3030f8642885SMaxime Ripard register_cpu_notifier(&pp->cpu_notifier); 3031f8642885SMaxime Ripard 3032c5aff182SThomas Petazzoni /* In default link is down */ 3033c5aff182SThomas Petazzoni netif_carrier_off(pp->dev); 3034c5aff182SThomas Petazzoni 3035c5aff182SThomas Petazzoni ret = mvneta_mdio_probe(pp); 3036c5aff182SThomas Petazzoni if (ret < 0) { 3037c5aff182SThomas Petazzoni netdev_err(dev, "cannot probe MDIO bus\n"); 3038c5aff182SThomas Petazzoni goto err_free_irq; 3039c5aff182SThomas Petazzoni } 3040c5aff182SThomas Petazzoni 3041c5aff182SThomas Petazzoni mvneta_start_dev(pp); 3042c5aff182SThomas Petazzoni 3043c5aff182SThomas Petazzoni return 0; 3044c5aff182SThomas Petazzoni 3045c5aff182SThomas Petazzoni err_free_irq: 304612bb03b4SMaxime Ripard free_percpu_irq(pp->dev->irq, pp->ports); 3047c5aff182SThomas Petazzoni err_cleanup_txqs: 3048c5aff182SThomas Petazzoni mvneta_cleanup_txqs(pp); 3049c5aff182SThomas Petazzoni err_cleanup_rxqs: 3050c5aff182SThomas Petazzoni mvneta_cleanup_rxqs(pp); 3051c5aff182SThomas Petazzoni return ret; 3052c5aff182SThomas Petazzoni } 3053c5aff182SThomas Petazzoni 3054c5aff182SThomas Petazzoni /* Stop the port, free port interrupt line */ 3055c5aff182SThomas Petazzoni static int mvneta_stop(struct net_device *dev) 3056c5aff182SThomas Petazzoni { 3057c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 3058f8642885SMaxime Ripard int cpu; 3059c5aff182SThomas Petazzoni 3060c5aff182SThomas Petazzoni mvneta_stop_dev(pp); 3061c5aff182SThomas Petazzoni mvneta_mdio_remove(pp); 3062f8642885SMaxime Ripard unregister_cpu_notifier(&pp->cpu_notifier); 3063f8642885SMaxime Ripard for_each_present_cpu(cpu) 3064f8642885SMaxime Ripard smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); 306512bb03b4SMaxime Ripard free_percpu_irq(dev->irq, pp->ports); 3066c5aff182SThomas Petazzoni mvneta_cleanup_rxqs(pp); 3067c5aff182SThomas Petazzoni mvneta_cleanup_txqs(pp); 3068c5aff182SThomas Petazzoni 3069c5aff182SThomas Petazzoni return 0; 3070c5aff182SThomas Petazzoni } 3071c5aff182SThomas Petazzoni 307215f59456SThomas Petazzoni static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 307315f59456SThomas Petazzoni { 307415f59456SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 307515f59456SThomas Petazzoni 307615f59456SThomas Petazzoni if (!pp->phy_dev) 307715f59456SThomas Petazzoni return -ENOTSUPP; 307815f59456SThomas Petazzoni 3079ecf7b361SStas Sergeev return phy_mii_ioctl(pp->phy_dev, ifr, cmd); 308015f59456SThomas Petazzoni } 308115f59456SThomas Petazzoni 3082c5aff182SThomas Petazzoni /* Ethtool methods */ 3083c5aff182SThomas Petazzoni 3084c5aff182SThomas Petazzoni /* Get settings (phy address, speed) for ethtools */ 3085c5aff182SThomas Petazzoni int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 3086c5aff182SThomas Petazzoni { 3087c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 3088c5aff182SThomas Petazzoni 3089c5aff182SThomas Petazzoni if (!pp->phy_dev) 3090c5aff182SThomas Petazzoni return -ENODEV; 3091c5aff182SThomas Petazzoni 3092c5aff182SThomas Petazzoni return phy_ethtool_gset(pp->phy_dev, cmd); 3093c5aff182SThomas Petazzoni } 3094c5aff182SThomas Petazzoni 3095c5aff182SThomas Petazzoni /* Set settings (phy address, speed) for ethtools */ 3096c5aff182SThomas Petazzoni int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 3097c5aff182SThomas Petazzoni { 3098c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 30990c0744fcSStas Sergeev struct phy_device *phydev = pp->phy_dev; 3100c5aff182SThomas Petazzoni 31010c0744fcSStas Sergeev if (!phydev) 3102c5aff182SThomas Petazzoni return -ENODEV; 3103c5aff182SThomas Petazzoni 31040c0744fcSStas Sergeev if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { 31050c0744fcSStas Sergeev u32 val; 31060c0744fcSStas Sergeev 31070c0744fcSStas Sergeev mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE); 31080c0744fcSStas Sergeev 31090c0744fcSStas Sergeev if (cmd->autoneg == AUTONEG_DISABLE) { 31100c0744fcSStas Sergeev val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 31110c0744fcSStas Sergeev val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | 31120c0744fcSStas Sergeev MVNETA_GMAC_CONFIG_GMII_SPEED | 31130c0744fcSStas Sergeev MVNETA_GMAC_CONFIG_FULL_DUPLEX); 31140c0744fcSStas Sergeev 31150c0744fcSStas Sergeev if (phydev->duplex) 31160c0744fcSStas Sergeev val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 31170c0744fcSStas Sergeev 31180c0744fcSStas Sergeev if (phydev->speed == SPEED_1000) 31190c0744fcSStas Sergeev val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 31200c0744fcSStas Sergeev else if (phydev->speed == SPEED_100) 31210c0744fcSStas Sergeev val |= MVNETA_GMAC_CONFIG_MII_SPEED; 31220c0744fcSStas Sergeev 31230c0744fcSStas Sergeev mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 31240c0744fcSStas Sergeev } 31250c0744fcSStas Sergeev 31260c0744fcSStas Sergeev pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE); 31270c0744fcSStas Sergeev netdev_info(pp->dev, "autoneg status set to %i\n", 31280c0744fcSStas Sergeev pp->use_inband_status); 31290c0744fcSStas Sergeev 31300c0744fcSStas Sergeev if (netif_running(dev)) { 31310c0744fcSStas Sergeev mvneta_port_down(pp); 31320c0744fcSStas Sergeev mvneta_port_up(pp); 31330c0744fcSStas Sergeev } 31340c0744fcSStas Sergeev } 31350c0744fcSStas Sergeev 3136c5aff182SThomas Petazzoni return phy_ethtool_sset(pp->phy_dev, cmd); 3137c5aff182SThomas Petazzoni } 3138c5aff182SThomas Petazzoni 3139c5aff182SThomas Petazzoni /* Set interrupt coalescing for ethtools */ 3140c5aff182SThomas Petazzoni static int mvneta_ethtool_set_coalesce(struct net_device *dev, 3141c5aff182SThomas Petazzoni struct ethtool_coalesce *c) 3142c5aff182SThomas Petazzoni { 3143c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 3144c5aff182SThomas Petazzoni int queue; 3145c5aff182SThomas Petazzoni 3146c5aff182SThomas Petazzoni for (queue = 0; queue < rxq_number; queue++) { 3147c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 3148c5aff182SThomas Petazzoni rxq->time_coal = c->rx_coalesce_usecs; 3149c5aff182SThomas Petazzoni rxq->pkts_coal = c->rx_max_coalesced_frames; 3150c5aff182SThomas Petazzoni mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 3151c5aff182SThomas Petazzoni mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 3152c5aff182SThomas Petazzoni } 3153c5aff182SThomas Petazzoni 3154c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 3155c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq = &pp->txqs[queue]; 3156c5aff182SThomas Petazzoni txq->done_pkts_coal = c->tx_max_coalesced_frames; 3157c5aff182SThomas Petazzoni mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 3158c5aff182SThomas Petazzoni } 3159c5aff182SThomas Petazzoni 3160c5aff182SThomas Petazzoni return 0; 3161c5aff182SThomas Petazzoni } 3162c5aff182SThomas Petazzoni 3163c5aff182SThomas Petazzoni /* get coalescing for ethtools */ 3164c5aff182SThomas Petazzoni static int mvneta_ethtool_get_coalesce(struct net_device *dev, 3165c5aff182SThomas Petazzoni struct ethtool_coalesce *c) 3166c5aff182SThomas Petazzoni { 3167c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 3168c5aff182SThomas Petazzoni 3169c5aff182SThomas Petazzoni c->rx_coalesce_usecs = pp->rxqs[0].time_coal; 3170c5aff182SThomas Petazzoni c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; 3171c5aff182SThomas Petazzoni 3172c5aff182SThomas Petazzoni c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; 3173c5aff182SThomas Petazzoni return 0; 3174c5aff182SThomas Petazzoni } 3175c5aff182SThomas Petazzoni 3176c5aff182SThomas Petazzoni 3177c5aff182SThomas Petazzoni static void mvneta_ethtool_get_drvinfo(struct net_device *dev, 3178c5aff182SThomas Petazzoni struct ethtool_drvinfo *drvinfo) 3179c5aff182SThomas Petazzoni { 3180c5aff182SThomas Petazzoni strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, 3181c5aff182SThomas Petazzoni sizeof(drvinfo->driver)); 3182c5aff182SThomas Petazzoni strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, 3183c5aff182SThomas Petazzoni sizeof(drvinfo->version)); 3184c5aff182SThomas Petazzoni strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 3185c5aff182SThomas Petazzoni sizeof(drvinfo->bus_info)); 3186c5aff182SThomas Petazzoni } 3187c5aff182SThomas Petazzoni 3188c5aff182SThomas Petazzoni 3189c5aff182SThomas Petazzoni static void mvneta_ethtool_get_ringparam(struct net_device *netdev, 3190c5aff182SThomas Petazzoni struct ethtool_ringparam *ring) 3191c5aff182SThomas Petazzoni { 3192c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(netdev); 3193c5aff182SThomas Petazzoni 3194c5aff182SThomas Petazzoni ring->rx_max_pending = MVNETA_MAX_RXD; 3195c5aff182SThomas Petazzoni ring->tx_max_pending = MVNETA_MAX_TXD; 3196c5aff182SThomas Petazzoni ring->rx_pending = pp->rx_ring_size; 3197c5aff182SThomas Petazzoni ring->tx_pending = pp->tx_ring_size; 3198c5aff182SThomas Petazzoni } 3199c5aff182SThomas Petazzoni 3200c5aff182SThomas Petazzoni static int mvneta_ethtool_set_ringparam(struct net_device *dev, 3201c5aff182SThomas Petazzoni struct ethtool_ringparam *ring) 3202c5aff182SThomas Petazzoni { 3203c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 3204c5aff182SThomas Petazzoni 3205c5aff182SThomas Petazzoni if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) 3206c5aff182SThomas Petazzoni return -EINVAL; 3207c5aff182SThomas Petazzoni pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 3208c5aff182SThomas Petazzoni ring->rx_pending : MVNETA_MAX_RXD; 32098eef5f97SEzequiel Garcia 32108eef5f97SEzequiel Garcia pp->tx_ring_size = clamp_t(u16, ring->tx_pending, 32118eef5f97SEzequiel Garcia MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); 32128eef5f97SEzequiel Garcia if (pp->tx_ring_size != ring->tx_pending) 32138eef5f97SEzequiel Garcia netdev_warn(dev, "TX queue size set to %u (requested %u)\n", 32148eef5f97SEzequiel Garcia pp->tx_ring_size, ring->tx_pending); 3215c5aff182SThomas Petazzoni 3216c5aff182SThomas Petazzoni if (netif_running(dev)) { 3217c5aff182SThomas Petazzoni mvneta_stop(dev); 3218c5aff182SThomas Petazzoni if (mvneta_open(dev)) { 3219c5aff182SThomas Petazzoni netdev_err(dev, 3220c5aff182SThomas Petazzoni "error on opening device after ring param change\n"); 3221c5aff182SThomas Petazzoni return -ENOMEM; 3222c5aff182SThomas Petazzoni } 3223c5aff182SThomas Petazzoni } 3224c5aff182SThomas Petazzoni 3225c5aff182SThomas Petazzoni return 0; 3226c5aff182SThomas Petazzoni } 3227c5aff182SThomas Petazzoni 32289b0cdefaSRussell King static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, 32299b0cdefaSRussell King u8 *data) 32309b0cdefaSRussell King { 32319b0cdefaSRussell King if (sset == ETH_SS_STATS) { 32329b0cdefaSRussell King int i; 32339b0cdefaSRussell King 32349b0cdefaSRussell King for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 32359b0cdefaSRussell King memcpy(data + i * ETH_GSTRING_LEN, 32369b0cdefaSRussell King mvneta_statistics[i].name, ETH_GSTRING_LEN); 32379b0cdefaSRussell King } 32389b0cdefaSRussell King } 32399b0cdefaSRussell King 32409b0cdefaSRussell King static void mvneta_ethtool_update_stats(struct mvneta_port *pp) 32419b0cdefaSRussell King { 32429b0cdefaSRussell King const struct mvneta_statistic *s; 32439b0cdefaSRussell King void __iomem *base = pp->base; 32449b0cdefaSRussell King u32 high, low, val; 32459b0cdefaSRussell King int i; 32469b0cdefaSRussell King 32479b0cdefaSRussell King for (i = 0, s = mvneta_statistics; 32489b0cdefaSRussell King s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); 32499b0cdefaSRussell King s++, i++) { 32509b0cdefaSRussell King val = 0; 32519b0cdefaSRussell King 32529b0cdefaSRussell King switch (s->type) { 32539b0cdefaSRussell King case T_REG_32: 32549b0cdefaSRussell King val = readl_relaxed(base + s->offset); 32559b0cdefaSRussell King break; 32569b0cdefaSRussell King case T_REG_64: 32579b0cdefaSRussell King /* Docs say to read low 32-bit then high */ 32589b0cdefaSRussell King low = readl_relaxed(base + s->offset); 32599b0cdefaSRussell King high = readl_relaxed(base + s->offset + 4); 32609b0cdefaSRussell King val = (u64)high << 32 | low; 32619b0cdefaSRussell King break; 32629b0cdefaSRussell King } 32639b0cdefaSRussell King 32649b0cdefaSRussell King pp->ethtool_stats[i] += val; 32659b0cdefaSRussell King } 32669b0cdefaSRussell King } 32679b0cdefaSRussell King 32689b0cdefaSRussell King static void mvneta_ethtool_get_stats(struct net_device *dev, 32699b0cdefaSRussell King struct ethtool_stats *stats, u64 *data) 32709b0cdefaSRussell King { 32719b0cdefaSRussell King struct mvneta_port *pp = netdev_priv(dev); 32729b0cdefaSRussell King int i; 32739b0cdefaSRussell King 32749b0cdefaSRussell King mvneta_ethtool_update_stats(pp); 32759b0cdefaSRussell King 32769b0cdefaSRussell King for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 32779b0cdefaSRussell King *data++ = pp->ethtool_stats[i]; 32789b0cdefaSRussell King } 32799b0cdefaSRussell King 32809b0cdefaSRussell King static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) 32819b0cdefaSRussell King { 32829b0cdefaSRussell King if (sset == ETH_SS_STATS) 32839b0cdefaSRussell King return ARRAY_SIZE(mvneta_statistics); 32849b0cdefaSRussell King return -EOPNOTSUPP; 32859b0cdefaSRussell King } 32869b0cdefaSRussell King 32879a401deaSGregory CLEMENT static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) 32889a401deaSGregory CLEMENT { 32899a401deaSGregory CLEMENT return MVNETA_RSS_LU_TABLE_SIZE; 32909a401deaSGregory CLEMENT } 32919a401deaSGregory CLEMENT 32929a401deaSGregory CLEMENT static int mvneta_ethtool_get_rxnfc(struct net_device *dev, 32939a401deaSGregory CLEMENT struct ethtool_rxnfc *info, 32949a401deaSGregory CLEMENT u32 *rules __always_unused) 32959a401deaSGregory CLEMENT { 32969a401deaSGregory CLEMENT switch (info->cmd) { 32979a401deaSGregory CLEMENT case ETHTOOL_GRXRINGS: 32989a401deaSGregory CLEMENT info->data = rxq_number; 32999a401deaSGregory CLEMENT return 0; 33009a401deaSGregory CLEMENT case ETHTOOL_GRXFH: 33019a401deaSGregory CLEMENT return -EOPNOTSUPP; 33029a401deaSGregory CLEMENT default: 33039a401deaSGregory CLEMENT return -EOPNOTSUPP; 33049a401deaSGregory CLEMENT } 33059a401deaSGregory CLEMENT } 33069a401deaSGregory CLEMENT 33079a401deaSGregory CLEMENT static int mvneta_config_rss(struct mvneta_port *pp) 33089a401deaSGregory CLEMENT { 33099a401deaSGregory CLEMENT int cpu; 33109a401deaSGregory CLEMENT u32 val; 33119a401deaSGregory CLEMENT 33129a401deaSGregory CLEMENT netif_tx_stop_all_queues(pp->dev); 33139a401deaSGregory CLEMENT 33149a401deaSGregory CLEMENT for_each_online_cpu(cpu) 33159a401deaSGregory CLEMENT smp_call_function_single(cpu, mvneta_percpu_mask_interrupt, 33169a401deaSGregory CLEMENT pp, true); 33179a401deaSGregory CLEMENT 33189a401deaSGregory CLEMENT /* We have to synchronise on the napi of each CPU */ 33199a401deaSGregory CLEMENT for_each_online_cpu(cpu) { 33209a401deaSGregory CLEMENT struct mvneta_pcpu_port *pcpu_port = 33219a401deaSGregory CLEMENT per_cpu_ptr(pp->ports, cpu); 33229a401deaSGregory CLEMENT 33239a401deaSGregory CLEMENT napi_synchronize(&pcpu_port->napi); 33249a401deaSGregory CLEMENT napi_disable(&pcpu_port->napi); 33259a401deaSGregory CLEMENT } 33269a401deaSGregory CLEMENT 33279a401deaSGregory CLEMENT pp->rxq_def = pp->indir[0]; 33289a401deaSGregory CLEMENT 33299a401deaSGregory CLEMENT /* Update unicast mapping */ 33309a401deaSGregory CLEMENT mvneta_set_rx_mode(pp->dev); 33319a401deaSGregory CLEMENT 33329a401deaSGregory CLEMENT /* Update val of portCfg register accordingly with all RxQueue types */ 33339a401deaSGregory CLEMENT val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 33349a401deaSGregory CLEMENT mvreg_write(pp, MVNETA_PORT_CONFIG, val); 33359a401deaSGregory CLEMENT 33369a401deaSGregory CLEMENT /* Update the elected CPU matching the new rxq_def */ 33379a401deaSGregory CLEMENT mvneta_percpu_elect(pp); 33389a401deaSGregory CLEMENT 33399a401deaSGregory CLEMENT /* We have to synchronise on the napi of each CPU */ 33409a401deaSGregory CLEMENT for_each_online_cpu(cpu) { 33419a401deaSGregory CLEMENT struct mvneta_pcpu_port *pcpu_port = 33429a401deaSGregory CLEMENT per_cpu_ptr(pp->ports, cpu); 33439a401deaSGregory CLEMENT 33449a401deaSGregory CLEMENT napi_enable(&pcpu_port->napi); 33459a401deaSGregory CLEMENT } 33469a401deaSGregory CLEMENT 33479a401deaSGregory CLEMENT netif_tx_start_all_queues(pp->dev); 33489a401deaSGregory CLEMENT 33499a401deaSGregory CLEMENT return 0; 33509a401deaSGregory CLEMENT } 33519a401deaSGregory CLEMENT 33529a401deaSGregory CLEMENT static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, 33539a401deaSGregory CLEMENT const u8 *key, const u8 hfunc) 33549a401deaSGregory CLEMENT { 33559a401deaSGregory CLEMENT struct mvneta_port *pp = netdev_priv(dev); 33569a401deaSGregory CLEMENT /* We require at least one supported parameter to be changed 33579a401deaSGregory CLEMENT * and no change in any of the unsupported parameters 33589a401deaSGregory CLEMENT */ 33599a401deaSGregory CLEMENT if (key || 33609a401deaSGregory CLEMENT (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 33619a401deaSGregory CLEMENT return -EOPNOTSUPP; 33629a401deaSGregory CLEMENT 33639a401deaSGregory CLEMENT if (!indir) 33649a401deaSGregory CLEMENT return 0; 33659a401deaSGregory CLEMENT 33669a401deaSGregory CLEMENT memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); 33679a401deaSGregory CLEMENT 33689a401deaSGregory CLEMENT return mvneta_config_rss(pp); 33699a401deaSGregory CLEMENT } 33709a401deaSGregory CLEMENT 33719a401deaSGregory CLEMENT static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 33729a401deaSGregory CLEMENT u8 *hfunc) 33739a401deaSGregory CLEMENT { 33749a401deaSGregory CLEMENT struct mvneta_port *pp = netdev_priv(dev); 33759a401deaSGregory CLEMENT 33769a401deaSGregory CLEMENT if (hfunc) 33779a401deaSGregory CLEMENT *hfunc = ETH_RSS_HASH_TOP; 33789a401deaSGregory CLEMENT 33799a401deaSGregory CLEMENT if (!indir) 33809a401deaSGregory CLEMENT return 0; 33819a401deaSGregory CLEMENT 33829a401deaSGregory CLEMENT memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); 33839a401deaSGregory CLEMENT 33849a401deaSGregory CLEMENT return 0; 33859a401deaSGregory CLEMENT } 33869a401deaSGregory CLEMENT 3387c5aff182SThomas Petazzoni static const struct net_device_ops mvneta_netdev_ops = { 3388c5aff182SThomas Petazzoni .ndo_open = mvneta_open, 3389c5aff182SThomas Petazzoni .ndo_stop = mvneta_stop, 3390c5aff182SThomas Petazzoni .ndo_start_xmit = mvneta_tx, 3391c5aff182SThomas Petazzoni .ndo_set_rx_mode = mvneta_set_rx_mode, 3392c5aff182SThomas Petazzoni .ndo_set_mac_address = mvneta_set_mac_addr, 3393c5aff182SThomas Petazzoni .ndo_change_mtu = mvneta_change_mtu, 3394b65657fcSSimon Guinot .ndo_fix_features = mvneta_fix_features, 3395c5aff182SThomas Petazzoni .ndo_get_stats64 = mvneta_get_stats64, 339615f59456SThomas Petazzoni .ndo_do_ioctl = mvneta_ioctl, 3397c5aff182SThomas Petazzoni }; 3398c5aff182SThomas Petazzoni 3399c5aff182SThomas Petazzoni const struct ethtool_ops mvneta_eth_tool_ops = { 3400c5aff182SThomas Petazzoni .get_link = ethtool_op_get_link, 3401c5aff182SThomas Petazzoni .get_settings = mvneta_ethtool_get_settings, 3402c5aff182SThomas Petazzoni .set_settings = mvneta_ethtool_set_settings, 3403c5aff182SThomas Petazzoni .set_coalesce = mvneta_ethtool_set_coalesce, 3404c5aff182SThomas Petazzoni .get_coalesce = mvneta_ethtool_get_coalesce, 3405c5aff182SThomas Petazzoni .get_drvinfo = mvneta_ethtool_get_drvinfo, 3406c5aff182SThomas Petazzoni .get_ringparam = mvneta_ethtool_get_ringparam, 3407c5aff182SThomas Petazzoni .set_ringparam = mvneta_ethtool_set_ringparam, 34089b0cdefaSRussell King .get_strings = mvneta_ethtool_get_strings, 34099b0cdefaSRussell King .get_ethtool_stats = mvneta_ethtool_get_stats, 34109b0cdefaSRussell King .get_sset_count = mvneta_ethtool_get_sset_count, 34119a401deaSGregory CLEMENT .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, 34129a401deaSGregory CLEMENT .get_rxnfc = mvneta_ethtool_get_rxnfc, 34139a401deaSGregory CLEMENT .get_rxfh = mvneta_ethtool_get_rxfh, 34149a401deaSGregory CLEMENT .set_rxfh = mvneta_ethtool_set_rxfh, 3415c5aff182SThomas Petazzoni }; 3416c5aff182SThomas Petazzoni 3417c5aff182SThomas Petazzoni /* Initialize hw */ 34189672850bSEzequiel Garcia static int mvneta_init(struct device *dev, struct mvneta_port *pp) 3419c5aff182SThomas Petazzoni { 3420c5aff182SThomas Petazzoni int queue; 3421c5aff182SThomas Petazzoni 3422c5aff182SThomas Petazzoni /* Disable port */ 3423c5aff182SThomas Petazzoni mvneta_port_disable(pp); 3424c5aff182SThomas Petazzoni 3425c5aff182SThomas Petazzoni /* Set port default values */ 3426c5aff182SThomas Petazzoni mvneta_defaults_set(pp); 3427c5aff182SThomas Petazzoni 34289672850bSEzequiel Garcia pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue), 3429c5aff182SThomas Petazzoni GFP_KERNEL); 3430c5aff182SThomas Petazzoni if (!pp->txqs) 3431c5aff182SThomas Petazzoni return -ENOMEM; 3432c5aff182SThomas Petazzoni 3433c5aff182SThomas Petazzoni /* Initialize TX descriptor rings */ 3434c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 3435c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq = &pp->txqs[queue]; 3436c5aff182SThomas Petazzoni txq->id = queue; 3437c5aff182SThomas Petazzoni txq->size = pp->tx_ring_size; 3438c5aff182SThomas Petazzoni txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 3439c5aff182SThomas Petazzoni } 3440c5aff182SThomas Petazzoni 34419672850bSEzequiel Garcia pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue), 3442c5aff182SThomas Petazzoni GFP_KERNEL); 34439672850bSEzequiel Garcia if (!pp->rxqs) 3444c5aff182SThomas Petazzoni return -ENOMEM; 3445c5aff182SThomas Petazzoni 3446c5aff182SThomas Petazzoni /* Create Rx descriptor rings */ 3447c5aff182SThomas Petazzoni for (queue = 0; queue < rxq_number; queue++) { 3448c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 3449c5aff182SThomas Petazzoni rxq->id = queue; 3450c5aff182SThomas Petazzoni rxq->size = pp->rx_ring_size; 3451c5aff182SThomas Petazzoni rxq->pkts_coal = MVNETA_RX_COAL_PKTS; 3452c5aff182SThomas Petazzoni rxq->time_coal = MVNETA_RX_COAL_USEC; 3453c5aff182SThomas Petazzoni } 3454c5aff182SThomas Petazzoni 3455c5aff182SThomas Petazzoni return 0; 3456c5aff182SThomas Petazzoni } 3457c5aff182SThomas Petazzoni 3458c5aff182SThomas Petazzoni /* platform glue : initialize decoding windows */ 345903ce758eSGreg KH static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 3460c5aff182SThomas Petazzoni const struct mbus_dram_target_info *dram) 3461c5aff182SThomas Petazzoni { 3462c5aff182SThomas Petazzoni u32 win_enable; 3463c5aff182SThomas Petazzoni u32 win_protect; 3464c5aff182SThomas Petazzoni int i; 3465c5aff182SThomas Petazzoni 3466c5aff182SThomas Petazzoni for (i = 0; i < 6; i++) { 3467c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 3468c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 3469c5aff182SThomas Petazzoni 3470c5aff182SThomas Petazzoni if (i < 4) 3471c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 3472c5aff182SThomas Petazzoni } 3473c5aff182SThomas Petazzoni 3474c5aff182SThomas Petazzoni win_enable = 0x3f; 3475c5aff182SThomas Petazzoni win_protect = 0; 3476c5aff182SThomas Petazzoni 3477c5aff182SThomas Petazzoni for (i = 0; i < dram->num_cs; i++) { 3478c5aff182SThomas Petazzoni const struct mbus_dram_window *cs = dram->cs + i; 3479c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | 3480c5aff182SThomas Petazzoni (cs->mbus_attr << 8) | dram->mbus_dram_target_id); 3481c5aff182SThomas Petazzoni 3482c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_WIN_SIZE(i), 3483c5aff182SThomas Petazzoni (cs->size - 1) & 0xffff0000); 3484c5aff182SThomas Petazzoni 3485c5aff182SThomas Petazzoni win_enable &= ~(1 << i); 3486c5aff182SThomas Petazzoni win_protect |= 3 << (2 * i); 3487c5aff182SThomas Petazzoni } 3488c5aff182SThomas Petazzoni 3489c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 3490db6ba9a5SMarcin Wojtas mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 3491c5aff182SThomas Petazzoni } 3492c5aff182SThomas Petazzoni 3493c5aff182SThomas Petazzoni /* Power up the port */ 34943f1dd4bcSThomas Petazzoni static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 3495c5aff182SThomas Petazzoni { 34963f1dd4bcSThomas Petazzoni u32 ctrl; 3497c5aff182SThomas Petazzoni 3498c5aff182SThomas Petazzoni /* MAC Cause register should be cleared */ 3499c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 3500c5aff182SThomas Petazzoni 35013f1dd4bcSThomas Petazzoni ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 3502c5aff182SThomas Petazzoni 35033f1dd4bcSThomas Petazzoni /* Even though it might look weird, when we're configured in 35043f1dd4bcSThomas Petazzoni * SGMII or QSGMII mode, the RGMII bit needs to be set. 35053f1dd4bcSThomas Petazzoni */ 35063f1dd4bcSThomas Petazzoni switch(phy_mode) { 35073f1dd4bcSThomas Petazzoni case PHY_INTERFACE_MODE_QSGMII: 35083f1dd4bcSThomas Petazzoni mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); 35093f1dd4bcSThomas Petazzoni ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; 35103f1dd4bcSThomas Petazzoni break; 35113f1dd4bcSThomas Petazzoni case PHY_INTERFACE_MODE_SGMII: 35123f1dd4bcSThomas Petazzoni mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); 35133f1dd4bcSThomas Petazzoni ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; 35143f1dd4bcSThomas Petazzoni break; 35153f1dd4bcSThomas Petazzoni case PHY_INTERFACE_MODE_RGMII: 35163f1dd4bcSThomas Petazzoni case PHY_INTERFACE_MODE_RGMII_ID: 35173f1dd4bcSThomas Petazzoni ctrl |= MVNETA_GMAC2_PORT_RGMII; 35183f1dd4bcSThomas Petazzoni break; 35193f1dd4bcSThomas Petazzoni default: 35203f1dd4bcSThomas Petazzoni return -EINVAL; 35213f1dd4bcSThomas Petazzoni } 3522c5aff182SThomas Petazzoni 3523c5aff182SThomas Petazzoni /* Cancel Port Reset */ 35243f1dd4bcSThomas Petazzoni ctrl &= ~MVNETA_GMAC2_PORT_RESET; 35253f1dd4bcSThomas Petazzoni mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); 3526c5aff182SThomas Petazzoni 3527c5aff182SThomas Petazzoni while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 3528c5aff182SThomas Petazzoni MVNETA_GMAC2_PORT_RESET) != 0) 3529c5aff182SThomas Petazzoni continue; 35303f1dd4bcSThomas Petazzoni 35313f1dd4bcSThomas Petazzoni return 0; 3532c5aff182SThomas Petazzoni } 3533c5aff182SThomas Petazzoni 3534c5aff182SThomas Petazzoni /* Device initialization routine */ 353503ce758eSGreg KH static int mvneta_probe(struct platform_device *pdev) 3536c5aff182SThomas Petazzoni { 3537c5aff182SThomas Petazzoni const struct mbus_dram_target_info *dram_target_info; 3538c3f0dd38SThomas Petazzoni struct resource *res; 3539c5aff182SThomas Petazzoni struct device_node *dn = pdev->dev.of_node; 3540c5aff182SThomas Petazzoni struct device_node *phy_node; 3541c5aff182SThomas Petazzoni struct mvneta_port *pp; 3542c5aff182SThomas Petazzoni struct net_device *dev; 35438cc3e439SThomas Petazzoni const char *dt_mac_addr; 35448cc3e439SThomas Petazzoni char hw_mac_addr[ETH_ALEN]; 35458cc3e439SThomas Petazzoni const char *mac_from; 3546f8af8e6eSStas Sergeev const char *managed; 35479110ee07SMarcin Wojtas int tx_csum_limit; 3548c5aff182SThomas Petazzoni int phy_mode; 3549c5aff182SThomas Petazzoni int err; 355012bb03b4SMaxime Ripard int cpu; 3551c5aff182SThomas Petazzoni 3552ee40a116SWilly Tarreau dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); 3553c5aff182SThomas Petazzoni if (!dev) 3554c5aff182SThomas Petazzoni return -ENOMEM; 3555c5aff182SThomas Petazzoni 3556c5aff182SThomas Petazzoni dev->irq = irq_of_parse_and_map(dn, 0); 3557c5aff182SThomas Petazzoni if (dev->irq == 0) { 3558c5aff182SThomas Petazzoni err = -EINVAL; 3559c5aff182SThomas Petazzoni goto err_free_netdev; 3560c5aff182SThomas Petazzoni } 3561c5aff182SThomas Petazzoni 3562c5aff182SThomas Petazzoni phy_node = of_parse_phandle(dn, "phy", 0); 3563c5aff182SThomas Petazzoni if (!phy_node) { 356483895bedSThomas Petazzoni if (!of_phy_is_fixed_link(dn)) { 356583895bedSThomas Petazzoni dev_err(&pdev->dev, "no PHY specified\n"); 3566c5aff182SThomas Petazzoni err = -ENODEV; 3567c5aff182SThomas Petazzoni goto err_free_irq; 3568c5aff182SThomas Petazzoni } 3569c5aff182SThomas Petazzoni 357083895bedSThomas Petazzoni err = of_phy_register_fixed_link(dn); 357183895bedSThomas Petazzoni if (err < 0) { 357283895bedSThomas Petazzoni dev_err(&pdev->dev, "cannot register fixed PHY\n"); 357383895bedSThomas Petazzoni goto err_free_irq; 357483895bedSThomas Petazzoni } 357583895bedSThomas Petazzoni 357683895bedSThomas Petazzoni /* In the case of a fixed PHY, the DT node associated 357783895bedSThomas Petazzoni * to the PHY is the Ethernet MAC DT node. 357883895bedSThomas Petazzoni */ 3579c891c24cSUwe Kleine-König phy_node = of_node_get(dn); 358083895bedSThomas Petazzoni } 358183895bedSThomas Petazzoni 3582c5aff182SThomas Petazzoni phy_mode = of_get_phy_mode(dn); 3583c5aff182SThomas Petazzoni if (phy_mode < 0) { 3584c5aff182SThomas Petazzoni dev_err(&pdev->dev, "incorrect phy-mode\n"); 3585c5aff182SThomas Petazzoni err = -EINVAL; 3586c891c24cSUwe Kleine-König goto err_put_phy_node; 3587c5aff182SThomas Petazzoni } 3588c5aff182SThomas Petazzoni 3589c5aff182SThomas Petazzoni dev->tx_queue_len = MVNETA_MAX_TXD; 3590c5aff182SThomas Petazzoni dev->watchdog_timeo = 5 * HZ; 3591c5aff182SThomas Petazzoni dev->netdev_ops = &mvneta_netdev_ops; 3592c5aff182SThomas Petazzoni 35937ad24ea4SWilfried Klaebe dev->ethtool_ops = &mvneta_eth_tool_ops; 3594c5aff182SThomas Petazzoni 3595c5aff182SThomas Petazzoni pp = netdev_priv(dev); 3596c5aff182SThomas Petazzoni pp->phy_node = phy_node; 3597c5aff182SThomas Petazzoni pp->phy_interface = phy_mode; 3598f8af8e6eSStas Sergeev 3599f8af8e6eSStas Sergeev err = of_property_read_string(dn, "managed", &managed); 3600f8af8e6eSStas Sergeev pp->use_inband_status = (err == 0 && 3601f8af8e6eSStas Sergeev strcmp(managed, "in-band-status") == 0); 3602f8642885SMaxime Ripard pp->cpu_notifier.notifier_call = mvneta_percpu_notifier; 3603c5aff182SThomas Petazzoni 360490b74c01SGregory CLEMENT pp->rxq_def = rxq_def; 360590b74c01SGregory CLEMENT 36069a401deaSGregory CLEMENT pp->indir[0] = rxq_def; 36079a401deaSGregory CLEMENT 3608189dd626SThomas Petazzoni pp->clk = devm_clk_get(&pdev->dev, NULL); 3609189dd626SThomas Petazzoni if (IS_ERR(pp->clk)) { 3610189dd626SThomas Petazzoni err = PTR_ERR(pp->clk); 3611c891c24cSUwe Kleine-König goto err_put_phy_node; 3612189dd626SThomas Petazzoni } 3613189dd626SThomas Petazzoni 3614189dd626SThomas Petazzoni clk_prepare_enable(pp->clk); 3615189dd626SThomas Petazzoni 3616c3f0dd38SThomas Petazzoni res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3617c3f0dd38SThomas Petazzoni pp->base = devm_ioremap_resource(&pdev->dev, res); 3618c3f0dd38SThomas Petazzoni if (IS_ERR(pp->base)) { 3619c3f0dd38SThomas Petazzoni err = PTR_ERR(pp->base); 36205445eaf3SArnaud Patard \(Rtp\) goto err_clk; 36215445eaf3SArnaud Patard \(Rtp\) } 36225445eaf3SArnaud Patard \(Rtp\) 362312bb03b4SMaxime Ripard /* Alloc per-cpu port structure */ 362412bb03b4SMaxime Ripard pp->ports = alloc_percpu(struct mvneta_pcpu_port); 362512bb03b4SMaxime Ripard if (!pp->ports) { 362612bb03b4SMaxime Ripard err = -ENOMEM; 362712bb03b4SMaxime Ripard goto err_clk; 362812bb03b4SMaxime Ripard } 362912bb03b4SMaxime Ripard 363074c41b04Swilly tarreau /* Alloc per-cpu stats */ 36311c213bd2SWANG Cong pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); 363274c41b04Swilly tarreau if (!pp->stats) { 363374c41b04Swilly tarreau err = -ENOMEM; 363412bb03b4SMaxime Ripard goto err_free_ports; 363574c41b04Swilly tarreau } 363674c41b04Swilly tarreau 36378cc3e439SThomas Petazzoni dt_mac_addr = of_get_mac_address(dn); 36386c7a9a3cSLuka Perkov if (dt_mac_addr) { 36398cc3e439SThomas Petazzoni mac_from = "device tree"; 36408cc3e439SThomas Petazzoni memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN); 36418cc3e439SThomas Petazzoni } else { 36428cc3e439SThomas Petazzoni mvneta_get_mac_addr(pp, hw_mac_addr); 36438cc3e439SThomas Petazzoni if (is_valid_ether_addr(hw_mac_addr)) { 36448cc3e439SThomas Petazzoni mac_from = "hardware"; 36458cc3e439SThomas Petazzoni memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); 36468cc3e439SThomas Petazzoni } else { 36478cc3e439SThomas Petazzoni mac_from = "random"; 36488cc3e439SThomas Petazzoni eth_hw_addr_random(dev); 36498cc3e439SThomas Petazzoni } 36508cc3e439SThomas Petazzoni } 36518cc3e439SThomas Petazzoni 36529110ee07SMarcin Wojtas if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { 36539110ee07SMarcin Wojtas if (tx_csum_limit < 0 || 36549110ee07SMarcin Wojtas tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { 36559110ee07SMarcin Wojtas tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 36569110ee07SMarcin Wojtas dev_info(&pdev->dev, 36579110ee07SMarcin Wojtas "Wrong TX csum limit in DT, set to %dB\n", 36589110ee07SMarcin Wojtas MVNETA_TX_CSUM_DEF_SIZE); 36599110ee07SMarcin Wojtas } 36609110ee07SMarcin Wojtas } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { 36619110ee07SMarcin Wojtas tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 36629110ee07SMarcin Wojtas } else { 36639110ee07SMarcin Wojtas tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; 36649110ee07SMarcin Wojtas } 36659110ee07SMarcin Wojtas 36669110ee07SMarcin Wojtas pp->tx_csum_limit = tx_csum_limit; 3667b65657fcSSimon Guinot 3668c5aff182SThomas Petazzoni pp->tx_ring_size = MVNETA_MAX_TXD; 3669c5aff182SThomas Petazzoni pp->rx_ring_size = MVNETA_MAX_RXD; 3670c5aff182SThomas Petazzoni 3671c5aff182SThomas Petazzoni pp->dev = dev; 3672c5aff182SThomas Petazzoni SET_NETDEV_DEV(dev, &pdev->dev); 3673c5aff182SThomas Petazzoni 36749672850bSEzequiel Garcia err = mvneta_init(&pdev->dev, pp); 36759672850bSEzequiel Garcia if (err < 0) 367674c41b04Swilly tarreau goto err_free_stats; 36773f1dd4bcSThomas Petazzoni 36783f1dd4bcSThomas Petazzoni err = mvneta_port_power_up(pp, phy_mode); 36793f1dd4bcSThomas Petazzoni if (err < 0) { 36803f1dd4bcSThomas Petazzoni dev_err(&pdev->dev, "can't power up port\n"); 36819672850bSEzequiel Garcia goto err_free_stats; 36823f1dd4bcSThomas Petazzoni } 3683c5aff182SThomas Petazzoni 3684c5aff182SThomas Petazzoni dram_target_info = mv_mbus_dram_info(); 3685c5aff182SThomas Petazzoni if (dram_target_info) 3686c5aff182SThomas Petazzoni mvneta_conf_mbus_windows(pp, dram_target_info); 3687c5aff182SThomas Petazzoni 368812bb03b4SMaxime Ripard for_each_present_cpu(cpu) { 368912bb03b4SMaxime Ripard struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 369012bb03b4SMaxime Ripard 369112bb03b4SMaxime Ripard netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT); 369212bb03b4SMaxime Ripard port->pp = pp; 369312bb03b4SMaxime Ripard } 3694c5aff182SThomas Petazzoni 36952adb719dSEzequiel Garcia dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 369601ef26caSEzequiel Garcia dev->hw_features |= dev->features; 369701ef26caSEzequiel Garcia dev->vlan_features |= dev->features; 3698b50b72deSwilly tarreau dev->priv_flags |= IFF_UNICAST_FLT; 36998eef5f97SEzequiel Garcia dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; 3700b50b72deSwilly tarreau 3701c5aff182SThomas Petazzoni err = register_netdev(dev); 3702c5aff182SThomas Petazzoni if (err < 0) { 3703c5aff182SThomas Petazzoni dev_err(&pdev->dev, "failed to register\n"); 37049672850bSEzequiel Garcia goto err_free_stats; 3705c5aff182SThomas Petazzoni } 3706c5aff182SThomas Petazzoni 37078cc3e439SThomas Petazzoni netdev_info(dev, "Using %s mac address %pM\n", mac_from, 37088cc3e439SThomas Petazzoni dev->dev_addr); 3709c5aff182SThomas Petazzoni 3710c5aff182SThomas Petazzoni platform_set_drvdata(pdev, pp->dev); 3711c5aff182SThomas Petazzoni 3712898b2970SStas Sergeev if (pp->use_inband_status) { 3713898b2970SStas Sergeev struct phy_device *phy = of_phy_find_device(dn); 3714898b2970SStas Sergeev 3715898b2970SStas Sergeev mvneta_fixed_link_update(pp, phy); 371604d53b20SRussell King 3717e5a03bfdSAndrew Lunn put_device(&phy->mdio.dev); 3718898b2970SStas Sergeev } 3719898b2970SStas Sergeev 3720c5aff182SThomas Petazzoni return 0; 3721c5aff182SThomas Petazzoni 372274c41b04Swilly tarreau err_free_stats: 372374c41b04Swilly tarreau free_percpu(pp->stats); 372412bb03b4SMaxime Ripard err_free_ports: 372512bb03b4SMaxime Ripard free_percpu(pp->ports); 37265445eaf3SArnaud Patard \(Rtp\) err_clk: 37275445eaf3SArnaud Patard \(Rtp\) clk_disable_unprepare(pp->clk); 3728c891c24cSUwe Kleine-König err_put_phy_node: 3729c891c24cSUwe Kleine-König of_node_put(phy_node); 3730c5aff182SThomas Petazzoni err_free_irq: 3731c5aff182SThomas Petazzoni irq_dispose_mapping(dev->irq); 3732c5aff182SThomas Petazzoni err_free_netdev: 3733c5aff182SThomas Petazzoni free_netdev(dev); 3734c5aff182SThomas Petazzoni return err; 3735c5aff182SThomas Petazzoni } 3736c5aff182SThomas Petazzoni 3737c5aff182SThomas Petazzoni /* Device removal routine */ 373803ce758eSGreg KH static int mvneta_remove(struct platform_device *pdev) 3739c5aff182SThomas Petazzoni { 3740c5aff182SThomas Petazzoni struct net_device *dev = platform_get_drvdata(pdev); 3741c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 3742c5aff182SThomas Petazzoni 3743c5aff182SThomas Petazzoni unregister_netdev(dev); 3744189dd626SThomas Petazzoni clk_disable_unprepare(pp->clk); 374512bb03b4SMaxime Ripard free_percpu(pp->ports); 374674c41b04Swilly tarreau free_percpu(pp->stats); 3747c5aff182SThomas Petazzoni irq_dispose_mapping(dev->irq); 3748c891c24cSUwe Kleine-König of_node_put(pp->phy_node); 3749c5aff182SThomas Petazzoni free_netdev(dev); 3750c5aff182SThomas Petazzoni 3751c5aff182SThomas Petazzoni return 0; 3752c5aff182SThomas Petazzoni } 3753c5aff182SThomas Petazzoni 3754c5aff182SThomas Petazzoni static const struct of_device_id mvneta_match[] = { 3755c5aff182SThomas Petazzoni { .compatible = "marvell,armada-370-neta" }, 3756f522a975SSimon Guinot { .compatible = "marvell,armada-xp-neta" }, 3757c5aff182SThomas Petazzoni { } 3758c5aff182SThomas Petazzoni }; 3759c5aff182SThomas Petazzoni MODULE_DEVICE_TABLE(of, mvneta_match); 3760c5aff182SThomas Petazzoni 3761c5aff182SThomas Petazzoni static struct platform_driver mvneta_driver = { 3762c5aff182SThomas Petazzoni .probe = mvneta_probe, 376303ce758eSGreg KH .remove = mvneta_remove, 3764c5aff182SThomas Petazzoni .driver = { 3765c5aff182SThomas Petazzoni .name = MVNETA_DRIVER_NAME, 3766c5aff182SThomas Petazzoni .of_match_table = mvneta_match, 3767c5aff182SThomas Petazzoni }, 3768c5aff182SThomas Petazzoni }; 3769c5aff182SThomas Petazzoni 3770c5aff182SThomas Petazzoni module_platform_driver(mvneta_driver); 3771c5aff182SThomas Petazzoni 3772c5aff182SThomas Petazzoni MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 3773c5aff182SThomas Petazzoni MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 3774c5aff182SThomas Petazzoni MODULE_LICENSE("GPL"); 3775c5aff182SThomas Petazzoni 3776c5aff182SThomas Petazzoni module_param(rxq_number, int, S_IRUGO); 3777c5aff182SThomas Petazzoni module_param(txq_number, int, S_IRUGO); 3778c5aff182SThomas Petazzoni 3779c5aff182SThomas Petazzoni module_param(rxq_def, int, S_IRUGO); 3780f19fadfcSwilly tarreau module_param(rx_copybreak, int, S_IRUGO | S_IWUSR); 3781