1c5aff182SThomas Petazzoni /* 2c5aff182SThomas Petazzoni * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. 3c5aff182SThomas Petazzoni * 4c5aff182SThomas Petazzoni * Copyright (C) 2012 Marvell 5c5aff182SThomas Petazzoni * 6c5aff182SThomas Petazzoni * Rami Rosen <rosenr@marvell.com> 7c5aff182SThomas Petazzoni * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 8c5aff182SThomas Petazzoni * 9c5aff182SThomas Petazzoni * This file is licensed under the terms of the GNU General Public 10c5aff182SThomas Petazzoni * License version 2. This program is licensed "as is" without any 11c5aff182SThomas Petazzoni * warranty of any kind, whether express or implied. 12c5aff182SThomas Petazzoni */ 13c5aff182SThomas Petazzoni 140e03f563SJisheng Zhang #include <linux/clk.h> 150e03f563SJisheng Zhang #include <linux/cpu.h> 16c5aff182SThomas Petazzoni #include <linux/etherdevice.h> 170e03f563SJisheng Zhang #include <linux/if_vlan.h> 18c5aff182SThomas Petazzoni #include <linux/inetdevice.h> 190e03f563SJisheng Zhang #include <linux/interrupt.h> 200e03f563SJisheng Zhang #include <linux/io.h> 210e03f563SJisheng Zhang #include <linux/kernel.h> 22c5aff182SThomas Petazzoni #include <linux/mbus.h> 23c5aff182SThomas Petazzoni #include <linux/module.h> 240e03f563SJisheng Zhang #include <linux/netdevice.h> 25c5aff182SThomas Petazzoni #include <linux/of.h> 260e03f563SJisheng Zhang #include <linux/of_address.h> 27c5aff182SThomas Petazzoni #include <linux/of_irq.h> 28c5aff182SThomas Petazzoni #include <linux/of_mdio.h> 29c5aff182SThomas Petazzoni #include <linux/of_net.h> 30a10c1c81SRussell King #include <linux/phy/phy.h> 31c5aff182SThomas Petazzoni #include <linux/phy.h> 32503f9aa9SRussell King #include <linux/phylink.h> 330e03f563SJisheng Zhang #include <linux/platform_device.h> 340e03f563SJisheng Zhang #include <linux/skbuff.h> 35baa11ebcSGregory CLEMENT #include <net/hwbm.h> 36dc35a10fSMarcin Wojtas #include "mvneta_bm.h" 370e03f563SJisheng Zhang #include <net/ip.h> 380e03f563SJisheng Zhang #include <net/ipv6.h> 390e03f563SJisheng Zhang #include <net/tso.h> 40568a3fa2SLorenzo Bianconi #include <net/page_pool.h> 419adafe2bSVladimir Oltean #include <net/pkt_sched.h> 420db51da7SLorenzo Bianconi #include <linux/bpf_trace.h> 43c5aff182SThomas Petazzoni 44c5aff182SThomas Petazzoni /* Registers */ 45c5aff182SThomas Petazzoni #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 46e5bdf689SMarcin Wojtas #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) 47dc35a10fSMarcin Wojtas #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 48dc35a10fSMarcin Wojtas #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 49dc35a10fSMarcin Wojtas #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 50dc35a10fSMarcin Wojtas #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 51c5aff182SThomas Petazzoni #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 52c5aff182SThomas Petazzoni #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 53c5aff182SThomas Petazzoni #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 54c5aff182SThomas Petazzoni #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) 55c5aff182SThomas Petazzoni #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) 56c5aff182SThomas Petazzoni #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) 57c5aff182SThomas Petazzoni #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 58c5aff182SThomas Petazzoni #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) 59c5aff182SThomas Petazzoni #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) 60c5aff182SThomas Petazzoni #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff 61c5aff182SThomas Petazzoni #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) 62c5aff182SThomas Petazzoni #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 63c5aff182SThomas Petazzoni #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 64dc35a10fSMarcin Wojtas #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) 65dc35a10fSMarcin Wojtas #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 66dc35a10fSMarcin Wojtas #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 67c5aff182SThomas Petazzoni #define MVNETA_PORT_RX_RESET 0x1cc0 68c5aff182SThomas Petazzoni #define MVNETA_PORT_RX_DMA_RESET BIT(0) 69c5aff182SThomas Petazzoni #define MVNETA_PHY_ADDR 0x2000 70c5aff182SThomas Petazzoni #define MVNETA_PHY_ADDR_MASK 0x1f 71c5aff182SThomas Petazzoni #define MVNETA_MBUS_RETRY 0x2010 72c5aff182SThomas Petazzoni #define MVNETA_UNIT_INTR_CAUSE 0x2080 73c5aff182SThomas Petazzoni #define MVNETA_UNIT_CONTROL 0x20B0 74c5aff182SThomas Petazzoni #define MVNETA_PHY_POLLING_ENABLE BIT(1) 75c5aff182SThomas Petazzoni #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) 76c5aff182SThomas Petazzoni #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 77c5aff182SThomas Petazzoni #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 78c5aff182SThomas Petazzoni #define MVNETA_BASE_ADDR_ENABLE 0x2290 792d2a514cSChris Packham #define MVNETA_AC5_CNM_DDR_TARGET 0x2 802d2a514cSChris Packham #define MVNETA_AC5_CNM_DDR_ATTR 0xb 81db6ba9a5SMarcin Wojtas #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 82c5aff182SThomas Petazzoni #define MVNETA_PORT_CONFIG 0x2400 83c5aff182SThomas Petazzoni #define MVNETA_UNI_PROMISC_MODE BIT(0) 84c5aff182SThomas Petazzoni #define MVNETA_DEF_RXQ(q) ((q) << 1) 85c5aff182SThomas Petazzoni #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) 86c5aff182SThomas Petazzoni #define MVNETA_TX_UNSET_ERR_SUM BIT(12) 87c5aff182SThomas Petazzoni #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) 88c5aff182SThomas Petazzoni #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) 89c5aff182SThomas Petazzoni #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) 90c5aff182SThomas Petazzoni #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) 91c5aff182SThomas Petazzoni #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ 92c5aff182SThomas Petazzoni MVNETA_DEF_RXQ_ARP(q) | \ 93c5aff182SThomas Petazzoni MVNETA_DEF_RXQ_TCP(q) | \ 94c5aff182SThomas Petazzoni MVNETA_DEF_RXQ_UDP(q) | \ 95c5aff182SThomas Petazzoni MVNETA_DEF_RXQ_BPDU(q) | \ 96c5aff182SThomas Petazzoni MVNETA_TX_UNSET_ERR_SUM | \ 97c5aff182SThomas Petazzoni MVNETA_RX_CSUM_WITH_PSEUDO_HDR) 98c5aff182SThomas Petazzoni #define MVNETA_PORT_CONFIG_EXTEND 0x2404 99c5aff182SThomas Petazzoni #define MVNETA_MAC_ADDR_LOW 0x2414 100c5aff182SThomas Petazzoni #define MVNETA_MAC_ADDR_HIGH 0x2418 101c5aff182SThomas Petazzoni #define MVNETA_SDMA_CONFIG 0x241c 102c5aff182SThomas Petazzoni #define MVNETA_SDMA_BRST_SIZE_16 4 103c5aff182SThomas Petazzoni #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 104c5aff182SThomas Petazzoni #define MVNETA_RX_NO_DATA_SWAP BIT(4) 105c5aff182SThomas Petazzoni #define MVNETA_TX_NO_DATA_SWAP BIT(5) 1069ad8fef6SThomas Petazzoni #define MVNETA_DESC_SWAP BIT(6) 107c5aff182SThomas Petazzoni #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 1084906887aSMaxime Chevallier #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 1094906887aSMaxime Chevallier #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) 110c5aff182SThomas Petazzoni #define MVNETA_PORT_STATUS 0x2444 111359f4cddSMaxim Kiselev #define MVNETA_TX_IN_PRGRS BIT(0) 112c5aff182SThomas Petazzoni #define MVNETA_TX_FIFO_EMPTY BIT(8) 113c5aff182SThomas Petazzoni #define MVNETA_RX_MIN_FRAME_SIZE 0x247c 114b4748553SSascha Hauer /* Only exists on Armada XP and Armada 370 */ 1153f1dd4bcSThomas Petazzoni #define MVNETA_SERDES_CFG 0x24A0 1165445eaf3SArnaud Patard \(Rtp\) #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 1173f1dd4bcSThomas Petazzoni #define MVNETA_QSGMII_SERDES_PROTO 0x0667 1181a642ca7SSascha Hauer #define MVNETA_HSGMII_SERDES_PROTO 0x1107 119c5aff182SThomas Petazzoni #define MVNETA_TYPE_PRIO 0x24bc 120c5aff182SThomas Petazzoni #define MVNETA_FORCE_UNI BIT(21) 121c5aff182SThomas Petazzoni #define MVNETA_TXQ_CMD_1 0x24e4 122c5aff182SThomas Petazzoni #define MVNETA_TXQ_CMD 0x2448 123c5aff182SThomas Petazzoni #define MVNETA_TXQ_DISABLE_SHIFT 8 124c5aff182SThomas Petazzoni #define MVNETA_TXQ_ENABLE_MASK 0x000000ff 125e483911fSAndrew Lunn #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 126e483911fSAndrew Lunn #define MVNETA_OVERRUN_FRAME_COUNT 0x2488 127898b2970SStas Sergeev #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 128898b2970SStas Sergeev #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) 129c5aff182SThomas Petazzoni #define MVNETA_ACC_MODE 0x2500 130dc35a10fSMarcin Wojtas #define MVNETA_BM_ADDRESS 0x2504 131c5aff182SThomas Petazzoni #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 132c5aff182SThomas Petazzoni #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff 133c5aff182SThomas Petazzoni #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 1342dcf75e2SGregory CLEMENT #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) 13550bf8cb6SGregory CLEMENT #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) 136c5aff182SThomas Petazzoni #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) 13740ba35e7Swilly tarreau 1382dcf75e2SGregory CLEMENT /* Exception Interrupt Port/Queue Cause register 1392dcf75e2SGregory CLEMENT * 1402dcf75e2SGregory CLEMENT * Their behavior depend of the mapping done using the PCPX2Q 1412dcf75e2SGregory CLEMENT * registers. For a given CPU if the bit associated to a queue is not 1422dcf75e2SGregory CLEMENT * set, then for the register a read from this CPU will always return 1432dcf75e2SGregory CLEMENT * 0 and a write won't do anything 1442dcf75e2SGregory CLEMENT */ 14540ba35e7Swilly tarreau 146c5aff182SThomas Petazzoni #define MVNETA_INTR_NEW_CAUSE 0x25a0 147c5aff182SThomas Petazzoni #define MVNETA_INTR_NEW_MASK 0x25a4 14840ba35e7Swilly tarreau 14940ba35e7Swilly tarreau /* bits 0..7 = TXQ SENT, one bit per queue. 15040ba35e7Swilly tarreau * bits 8..15 = RXQ OCCUP, one bit per queue. 15140ba35e7Swilly tarreau * bits 16..23 = RXQ FREE, one bit per queue. 15240ba35e7Swilly tarreau * bit 29 = OLD_REG_SUM, see old reg ? 15340ba35e7Swilly tarreau * bit 30 = TX_ERR_SUM, one bit for 4 ports 15440ba35e7Swilly tarreau * bit 31 = MISC_SUM, one bit for 4 ports 15540ba35e7Swilly tarreau */ 15640ba35e7Swilly tarreau #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) 15740ba35e7Swilly tarreau #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) 15840ba35e7Swilly tarreau #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 15940ba35e7Swilly tarreau #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) 160898b2970SStas Sergeev #define MVNETA_MISCINTR_INTR_MASK BIT(31) 16140ba35e7Swilly tarreau 162c5aff182SThomas Petazzoni #define MVNETA_INTR_OLD_CAUSE 0x25a8 163c5aff182SThomas Petazzoni #define MVNETA_INTR_OLD_MASK 0x25ac 16440ba35e7Swilly tarreau 16540ba35e7Swilly tarreau /* Data Path Port/Queue Cause Register */ 166c5aff182SThomas Petazzoni #define MVNETA_INTR_MISC_CAUSE 0x25b0 167c5aff182SThomas Petazzoni #define MVNETA_INTR_MISC_MASK 0x25b4 16840ba35e7Swilly tarreau 16940ba35e7Swilly tarreau #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) 17040ba35e7Swilly tarreau #define MVNETA_CAUSE_LINK_CHANGE BIT(1) 17140ba35e7Swilly tarreau #define MVNETA_CAUSE_PTP BIT(4) 17240ba35e7Swilly tarreau 17340ba35e7Swilly tarreau #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) 17440ba35e7Swilly tarreau #define MVNETA_CAUSE_RX_OVERRUN BIT(8) 17540ba35e7Swilly tarreau #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) 17640ba35e7Swilly tarreau #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) 17740ba35e7Swilly tarreau #define MVNETA_CAUSE_TX_UNDERUN BIT(11) 17840ba35e7Swilly tarreau #define MVNETA_CAUSE_PRBS_ERR BIT(12) 17940ba35e7Swilly tarreau #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) 18040ba35e7Swilly tarreau #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) 18140ba35e7Swilly tarreau 18240ba35e7Swilly tarreau #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 18340ba35e7Swilly tarreau #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) 18440ba35e7Swilly tarreau #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) 18540ba35e7Swilly tarreau 18640ba35e7Swilly tarreau #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 18740ba35e7Swilly tarreau #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) 18840ba35e7Swilly tarreau #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) 18940ba35e7Swilly tarreau 190c5aff182SThomas Petazzoni #define MVNETA_INTR_ENABLE 0x25b8 191c5aff182SThomas Petazzoni #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 192dc1aadf6SMarcin Wojtas #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff 19340ba35e7Swilly tarreau 194c5aff182SThomas Petazzoni #define MVNETA_RXQ_CMD 0x2680 195c5aff182SThomas Petazzoni #define MVNETA_RXQ_DISABLE_SHIFT 8 196c5aff182SThomas Petazzoni #define MVNETA_RXQ_ENABLE_MASK 0x000000ff 197c5aff182SThomas Petazzoni #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) 198c5aff182SThomas Petazzoni #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) 199c5aff182SThomas Petazzoni #define MVNETA_GMAC_CTRL_0 0x2c00 200c5aff182SThomas Petazzoni #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 201c5aff182SThomas Petazzoni #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 20222f4bf8aSRussell King #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1) 203c5aff182SThomas Petazzoni #define MVNETA_GMAC0_PORT_ENABLE BIT(0) 204c5aff182SThomas Petazzoni #define MVNETA_GMAC_CTRL_2 0x2c08 205898b2970SStas Sergeev #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) 206a79121d3SThomas Petazzoni #define MVNETA_GMAC2_PCS_ENABLE BIT(3) 207c5aff182SThomas Petazzoni #define MVNETA_GMAC2_PORT_RGMII BIT(4) 208c5aff182SThomas Petazzoni #define MVNETA_GMAC2_PORT_RESET BIT(6) 209c5aff182SThomas Petazzoni #define MVNETA_GMAC_STATUS 0x2c10 210c5aff182SThomas Petazzoni #define MVNETA_GMAC_LINK_UP BIT(0) 211c5aff182SThomas Petazzoni #define MVNETA_GMAC_SPEED_1000 BIT(1) 212c5aff182SThomas Petazzoni #define MVNETA_GMAC_SPEED_100 BIT(2) 213c5aff182SThomas Petazzoni #define MVNETA_GMAC_FULL_DUPLEX BIT(3) 214c5aff182SThomas Petazzoni #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) 215c5aff182SThomas Petazzoni #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) 216c5aff182SThomas Petazzoni #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) 217c5aff182SThomas Petazzoni #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) 218503f9aa9SRussell King #define MVNETA_GMAC_AN_COMPLETE BIT(11) 219503f9aa9SRussell King #define MVNETA_GMAC_SYNC_OK BIT(14) 220c5aff182SThomas Petazzoni #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 221c5aff182SThomas Petazzoni #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 222c5aff182SThomas Petazzoni #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 223898b2970SStas Sergeev #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) 22422f4bf8aSRussell King #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3) 22522f4bf8aSRussell King #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4) 226c5aff182SThomas Petazzoni #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 227c5aff182SThomas Petazzoni #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 22871408602SThomas Petazzoni #define MVNETA_GMAC_AN_SPEED_EN BIT(7) 22922f4bf8aSRussell King #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8) 23022f4bf8aSRussell King #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9) 231898b2970SStas Sergeev #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) 232c5aff182SThomas Petazzoni #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 23371408602SThomas Petazzoni #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) 234da58a931SMaxime Chevallier #define MVNETA_GMAC_CTRL_4 0x2c90 235da58a931SMaxime Chevallier #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1) 236e483911fSAndrew Lunn #define MVNETA_MIB_COUNTERS_BASE 0x3000 237c5aff182SThomas Petazzoni #define MVNETA_MIB_LATE_COLLISION 0x7c 238c5aff182SThomas Petazzoni #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 239c5aff182SThomas Petazzoni #define MVNETA_DA_FILT_OTH_MCAST 0x3500 240c5aff182SThomas Petazzoni #define MVNETA_DA_FILT_UCAST_BASE 0x3600 241c5aff182SThomas Petazzoni #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) 242c5aff182SThomas Petazzoni #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) 243c5aff182SThomas Petazzoni #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 244c5aff182SThomas Petazzoni #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) 245c5aff182SThomas Petazzoni #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) 246c5aff182SThomas Petazzoni #define MVNETA_TXQ_DEC_SENT_SHIFT 16 2472a90f7e1SSimon Guinot #define MVNETA_TXQ_DEC_SENT_MASK 0xff 248c5aff182SThomas Petazzoni #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) 249c5aff182SThomas Petazzoni #define MVNETA_TXQ_SENT_DESC_SHIFT 16 250c5aff182SThomas Petazzoni #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 251c5aff182SThomas Petazzoni #define MVNETA_PORT_TX_RESET 0x3cf0 252c5aff182SThomas Petazzoni #define MVNETA_PORT_TX_DMA_RESET BIT(0) 2532551dc9eSMaxime Chevallier #define MVNETA_TXQ_CMD1_REG 0x3e00 2542551dc9eSMaxime Chevallier #define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3) 2552551dc9eSMaxime Chevallier #define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0) 2562551dc9eSMaxime Chevallier #define MVNETA_REFILL_NUM_CLK_REG 0x3e08 2572551dc9eSMaxime Chevallier #define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff 258c5aff182SThomas Petazzoni #define MVNETA_TX_MTU 0x3e0c 259c5aff182SThomas Petazzoni #define MVNETA_TX_TOKEN_SIZE 0x3e14 260c5aff182SThomas Petazzoni #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff 2612551dc9eSMaxime Chevallier #define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2)) 2622551dc9eSMaxime Chevallier #define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000 2632551dc9eSMaxime Chevallier #define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20 2642551dc9eSMaxime Chevallier #define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff 265c5aff182SThomas Petazzoni #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) 266c5aff182SThomas Petazzoni #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff 267c5aff182SThomas Petazzoni 2682551dc9eSMaxime Chevallier /* The values of the bucket refill base period and refill period are taken from 2692551dc9eSMaxime Chevallier * the reference manual, and adds up to a base resolution of 10Kbps. This allows 2702551dc9eSMaxime Chevallier * to cover all rate-limit values from 10Kbps up to 5Gbps 2712551dc9eSMaxime Chevallier */ 2722551dc9eSMaxime Chevallier 2732551dc9eSMaxime Chevallier /* Base period for the rate limit algorithm */ 2742551dc9eSMaxime Chevallier #define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100 2752551dc9eSMaxime Chevallier 2762551dc9eSMaxime Chevallier /* Number of Base Period to wait between each bucket refill */ 2772551dc9eSMaxime Chevallier #define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000 2782551dc9eSMaxime Chevallier 2792551dc9eSMaxime Chevallier /* The base resolution for rate limiting, in bps. Any max_rate value should be 2802551dc9eSMaxime Chevallier * a multiple of that value. 2812551dc9eSMaxime Chevallier */ 2822551dc9eSMaxime Chevallier #define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \ 2832551dc9eSMaxime Chevallier (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \ 2842551dc9eSMaxime Chevallier MVNETA_TXQ_BUCKET_REFILL_PERIOD)) 2852551dc9eSMaxime Chevallier 2866d81f451SRussell King #define MVNETA_LPI_CTRL_0 0x2cc0 2876d81f451SRussell King #define MVNETA_LPI_CTRL_1 0x2cc4 2886d81f451SRussell King #define MVNETA_LPI_REQUEST_ENABLE BIT(0) 2896d81f451SRussell King #define MVNETA_LPI_CTRL_2 0x2cc8 2906d81f451SRussell King #define MVNETA_LPI_STATUS 0x2ccc 2916d81f451SRussell King 292c5aff182SThomas Petazzoni #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 293c5aff182SThomas Petazzoni 294c5aff182SThomas Petazzoni /* Descriptor ring Macros */ 295c5aff182SThomas Petazzoni #define MVNETA_QUEUE_NEXT_DESC(q, index) \ 296c5aff182SThomas Petazzoni (((index) < (q)->last_desc) ? ((index) + 1) : 0) 297c5aff182SThomas Petazzoni 298c5aff182SThomas Petazzoni /* Various constants */ 299c5aff182SThomas Petazzoni 300c5aff182SThomas Petazzoni /* Coalescing */ 30106708f81SDmitri Epshtein #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ 302c5aff182SThomas Petazzoni #define MVNETA_RX_COAL_PKTS 32 303c5aff182SThomas Petazzoni #define MVNETA_RX_COAL_USEC 100 304c5aff182SThomas Petazzoni 3056a20c175SThomas Petazzoni /* The two bytes Marvell header. Either contains a special value used 306c5aff182SThomas Petazzoni * by Marvell switches when a specific hardware mode is enabled (not 307c5aff182SThomas Petazzoni * supported by this driver) or is filled automatically by zeroes on 308c5aff182SThomas Petazzoni * the RX side. Those two bytes being at the front of the Ethernet 309c5aff182SThomas Petazzoni * header, they allow to have the IP header aligned on a 4 bytes 310c5aff182SThomas Petazzoni * boundary automatically: the hardware skips those two bytes on its 311c5aff182SThomas Petazzoni * own. 312c5aff182SThomas Petazzoni */ 313c5aff182SThomas Petazzoni #define MVNETA_MH_SIZE 2 314c5aff182SThomas Petazzoni 315c5aff182SThomas Petazzoni #define MVNETA_VLAN_TAG_LEN 4 316c5aff182SThomas Petazzoni 3179110ee07SMarcin Wojtas #define MVNETA_TX_CSUM_DEF_SIZE 1600 318c5aff182SThomas Petazzoni #define MVNETA_TX_CSUM_MAX_SIZE 9800 319dc35a10fSMarcin Wojtas #define MVNETA_ACC_MODE_EXT1 1 320dc35a10fSMarcin Wojtas #define MVNETA_ACC_MODE_EXT2 2 321dc35a10fSMarcin Wojtas 322dc35a10fSMarcin Wojtas #define MVNETA_MAX_DECODE_WIN 6 323c5aff182SThomas Petazzoni 324c5aff182SThomas Petazzoni /* Timeout constants */ 325c5aff182SThomas Petazzoni #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 326c5aff182SThomas Petazzoni #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 327c5aff182SThomas Petazzoni #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 328c5aff182SThomas Petazzoni 329c5aff182SThomas Petazzoni #define MVNETA_TX_MTU_MAX 0x3ffff 330c5aff182SThomas Petazzoni 3319a401deaSGregory CLEMENT /* The RSS lookup table actually has 256 entries but we do not use 3329a401deaSGregory CLEMENT * them yet 3339a401deaSGregory CLEMENT */ 3349a401deaSGregory CLEMENT #define MVNETA_RSS_LU_TABLE_SIZE 1 3359a401deaSGregory CLEMENT 336c5aff182SThomas Petazzoni /* Max number of Rx descriptors */ 337c307e2a8SYelena Krivosheev #define MVNETA_MAX_RXD 512 338c5aff182SThomas Petazzoni 339c5aff182SThomas Petazzoni /* Max number of Tx descriptors */ 340c307e2a8SYelena Krivosheev #define MVNETA_MAX_TXD 1024 341c5aff182SThomas Petazzoni 3428eef5f97SEzequiel Garcia /* Max number of allowed TCP segments for software TSO */ 3438eef5f97SEzequiel Garcia #define MVNETA_MAX_TSO_SEGS 100 3448eef5f97SEzequiel Garcia 3458eef5f97SEzequiel Garcia #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 3468eef5f97SEzequiel Garcia 34733f4cefbSRussell King (Oracle) /* The size of a TSO header page */ 34833f4cefbSRussell King (Oracle) #define MVNETA_TSO_PAGE_SIZE (2 * PAGE_SIZE) 34933f4cefbSRussell King (Oracle) 35033f4cefbSRussell King (Oracle) /* Number of TSO headers per page. This should be a power of 2 */ 35133f4cefbSRussell King (Oracle) #define MVNETA_TSO_PER_PAGE (MVNETA_TSO_PAGE_SIZE / TSO_HEADER_SIZE) 35233f4cefbSRussell King (Oracle) 35333f4cefbSRussell King (Oracle) /* Maximum number of TSO header pages */ 35433f4cefbSRussell King (Oracle) #define MVNETA_MAX_TSO_PAGES (MVNETA_MAX_TXD / MVNETA_TSO_PER_PAGE) 35533f4cefbSRussell King (Oracle) 356c5aff182SThomas Petazzoni /* descriptor aligned size */ 357c5aff182SThomas Petazzoni #define MVNETA_DESC_ALIGNED_SIZE 32 358c5aff182SThomas Petazzoni 3598d5047cfSMarcin Wojtas /* Number of bytes to be taken into account by HW when putting incoming data 3608d5047cfSMarcin Wojtas * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet 3618d5047cfSMarcin Wojtas * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers. 3628d5047cfSMarcin Wojtas */ 3638d5047cfSMarcin Wojtas #define MVNETA_RX_PKT_OFFSET_CORRECTION 64 3648d5047cfSMarcin Wojtas 365c5aff182SThomas Petazzoni #define MVNETA_RX_PKT_SIZE(mtu) \ 366c5aff182SThomas Petazzoni ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 367c5aff182SThomas Petazzoni ETH_HLEN + ETH_FCS_LEN, \ 368c66e98c9SJisheng Zhang cache_line_size()) 369c5aff182SThomas Petazzoni 370ca23cb0bSSven Auhagen /* Driver assumes that the last 3 bits are 0 */ 371e2243720SAlexander Lobakin #define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) 3728dc9a088SLorenzo Bianconi #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ 3730db51da7SLorenzo Bianconi MVNETA_SKB_HEADROOM)) 3748dc9a088SLorenzo Bianconi #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD) 3758dc9a088SLorenzo Bianconi 376dc35a10fSMarcin Wojtas #define MVNETA_RX_GET_BM_POOL_ID(rxd) \ 377dc35a10fSMarcin Wojtas (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) 378c5aff182SThomas Petazzoni 3796d81f451SRussell King enum { 3806d81f451SRussell King ETHTOOL_STAT_EEE_WAKEUP, 38117a96da6SGregory CLEMENT ETHTOOL_STAT_SKB_ALLOC_ERR, 38217a96da6SGregory CLEMENT ETHTOOL_STAT_REFILL_ERR, 3833d866523SLorenzo Bianconi ETHTOOL_XDP_REDIRECT, 3843d866523SLorenzo Bianconi ETHTOOL_XDP_PASS, 3853d866523SLorenzo Bianconi ETHTOOL_XDP_DROP, 3863d866523SLorenzo Bianconi ETHTOOL_XDP_TX, 38715070919SJesper Dangaard Brouer ETHTOOL_XDP_TX_ERR, 38815070919SJesper Dangaard Brouer ETHTOOL_XDP_XMIT, 38915070919SJesper Dangaard Brouer ETHTOOL_XDP_XMIT_ERR, 3906d81f451SRussell King ETHTOOL_MAX_STATS, 3916d81f451SRussell King }; 3926d81f451SRussell King 3939b0cdefaSRussell King struct mvneta_statistic { 3949b0cdefaSRussell King unsigned short offset; 3959b0cdefaSRussell King unsigned short type; 3969b0cdefaSRussell King const char name[ETH_GSTRING_LEN]; 3979b0cdefaSRussell King }; 3989b0cdefaSRussell King 3999b0cdefaSRussell King #define T_REG_32 32 4009b0cdefaSRussell King #define T_REG_64 64 4016d81f451SRussell King #define T_SW 1 4029b0cdefaSRussell King 4036c8a8cfdSLorenzo Bianconi #define MVNETA_XDP_PASS 0 4046c8a8cfdSLorenzo Bianconi #define MVNETA_XDP_DROPPED BIT(0) 4056c8a8cfdSLorenzo Bianconi #define MVNETA_XDP_TX BIT(1) 4066c8a8cfdSLorenzo Bianconi #define MVNETA_XDP_REDIR BIT(2) 4070db51da7SLorenzo Bianconi 4089b0cdefaSRussell King static const struct mvneta_statistic mvneta_statistics[] = { 4099b0cdefaSRussell King { 0x3000, T_REG_64, "good_octets_received", }, 4109b0cdefaSRussell King { 0x3010, T_REG_32, "good_frames_received", }, 4119b0cdefaSRussell King { 0x3008, T_REG_32, "bad_octets_received", }, 4129b0cdefaSRussell King { 0x3014, T_REG_32, "bad_frames_received", }, 4139b0cdefaSRussell King { 0x3018, T_REG_32, "broadcast_frames_received", }, 4149b0cdefaSRussell King { 0x301c, T_REG_32, "multicast_frames_received", }, 4159b0cdefaSRussell King { 0x3050, T_REG_32, "unrec_mac_control_received", }, 4169b0cdefaSRussell King { 0x3058, T_REG_32, "good_fc_received", }, 4179b0cdefaSRussell King { 0x305c, T_REG_32, "bad_fc_received", }, 4189b0cdefaSRussell King { 0x3060, T_REG_32, "undersize_received", }, 4199b0cdefaSRussell King { 0x3064, T_REG_32, "fragments_received", }, 4209b0cdefaSRussell King { 0x3068, T_REG_32, "oversize_received", }, 4219b0cdefaSRussell King { 0x306c, T_REG_32, "jabber_received", }, 4229b0cdefaSRussell King { 0x3070, T_REG_32, "mac_receive_error", }, 4239b0cdefaSRussell King { 0x3074, T_REG_32, "bad_crc_event", }, 4249b0cdefaSRussell King { 0x3078, T_REG_32, "collision", }, 4259b0cdefaSRussell King { 0x307c, T_REG_32, "late_collision", }, 4269b0cdefaSRussell King { 0x2484, T_REG_32, "rx_discard", }, 4279b0cdefaSRussell King { 0x2488, T_REG_32, "rx_overrun", }, 4289b0cdefaSRussell King { 0x3020, T_REG_32, "frames_64_octets", }, 4299b0cdefaSRussell King { 0x3024, T_REG_32, "frames_65_to_127_octets", }, 4309b0cdefaSRussell King { 0x3028, T_REG_32, "frames_128_to_255_octets", }, 4319b0cdefaSRussell King { 0x302c, T_REG_32, "frames_256_to_511_octets", }, 4329b0cdefaSRussell King { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, 4339b0cdefaSRussell King { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, 4349b0cdefaSRussell King { 0x3038, T_REG_64, "good_octets_sent", }, 4359b0cdefaSRussell King { 0x3040, T_REG_32, "good_frames_sent", }, 4369b0cdefaSRussell King { 0x3044, T_REG_32, "excessive_collision", }, 4379b0cdefaSRussell King { 0x3048, T_REG_32, "multicast_frames_sent", }, 4389b0cdefaSRussell King { 0x304c, T_REG_32, "broadcast_frames_sent", }, 4399b0cdefaSRussell King { 0x3054, T_REG_32, "fc_sent", }, 4409b0cdefaSRussell King { 0x300c, T_REG_32, "internal_mac_transmit_err", }, 4416d81f451SRussell King { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, 44217a96da6SGregory CLEMENT { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", }, 44317a96da6SGregory CLEMENT { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", }, 4447d51a015SLorenzo Bianconi { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", }, 4457d51a015SLorenzo Bianconi { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", }, 4467d51a015SLorenzo Bianconi { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", }, 4477d51a015SLorenzo Bianconi { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", }, 44815070919SJesper Dangaard Brouer { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", }, 4497d51a015SLorenzo Bianconi { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", }, 45015070919SJesper Dangaard Brouer { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", }, 4519b0cdefaSRussell King }; 4529b0cdefaSRussell King 453320d5441SLorenzo Bianconi struct mvneta_stats { 454320d5441SLorenzo Bianconi u64 rx_packets; 455320d5441SLorenzo Bianconi u64 rx_bytes; 456320d5441SLorenzo Bianconi u64 tx_packets; 457320d5441SLorenzo Bianconi u64 tx_bytes; 4583d866523SLorenzo Bianconi /* xdp */ 4593d866523SLorenzo Bianconi u64 xdp_redirect; 4603d866523SLorenzo Bianconi u64 xdp_pass; 4613d866523SLorenzo Bianconi u64 xdp_drop; 4627d51a015SLorenzo Bianconi u64 xdp_xmit; 46315070919SJesper Dangaard Brouer u64 xdp_xmit_err; 4643d866523SLorenzo Bianconi u64 xdp_tx; 46515070919SJesper Dangaard Brouer u64 xdp_tx_err; 466320d5441SLorenzo Bianconi }; 467320d5441SLorenzo Bianconi 4689ac41f3cSLorenzo Bianconi struct mvneta_ethtool_stats { 469320d5441SLorenzo Bianconi struct mvneta_stats ps; 4709ac41f3cSLorenzo Bianconi u64 skb_alloc_error; 4719ac41f3cSLorenzo Bianconi u64 refill_error; 4729ac41f3cSLorenzo Bianconi }; 4739ac41f3cSLorenzo Bianconi 47474c41b04Swilly tarreau struct mvneta_pcpu_stats { 475c5aff182SThomas Petazzoni struct u64_stats_sync syncp; 4769ac41f3cSLorenzo Bianconi 4779ac41f3cSLorenzo Bianconi struct mvneta_ethtool_stats es; 478c35947b8SLorenzo Bianconi u64 rx_dropped; 479c35947b8SLorenzo Bianconi u64 rx_errors; 480c5aff182SThomas Petazzoni }; 481c5aff182SThomas Petazzoni 48212bb03b4SMaxime Ripard struct mvneta_pcpu_port { 48312bb03b4SMaxime Ripard /* Pointer to the shared port */ 48412bb03b4SMaxime Ripard struct mvneta_port *pp; 48512bb03b4SMaxime Ripard 48612bb03b4SMaxime Ripard /* Pointer to the CPU-local NAPI struct */ 48712bb03b4SMaxime Ripard struct napi_struct napi; 48812bb03b4SMaxime Ripard 48912bb03b4SMaxime Ripard /* Cause of the previous interrupt */ 49012bb03b4SMaxime Ripard u32 cause_rx_tx; 49112bb03b4SMaxime Ripard }; 49212bb03b4SMaxime Ripard 49362a502ccSLorenzo Bianconi enum { 49462a502ccSLorenzo Bianconi __MVNETA_DOWN, 49562a502ccSLorenzo Bianconi }; 49662a502ccSLorenzo Bianconi 497c5aff182SThomas Petazzoni struct mvneta_port { 498dc35a10fSMarcin Wojtas u8 id; 49912bb03b4SMaxime Ripard struct mvneta_pcpu_port __percpu *ports; 50012bb03b4SMaxime Ripard struct mvneta_pcpu_stats __percpu *stats; 50112bb03b4SMaxime Ripard 50262a502ccSLorenzo Bianconi unsigned long state; 50362a502ccSLorenzo Bianconi 504c5aff182SThomas Petazzoni int pkt_size; 505c5aff182SThomas Petazzoni void __iomem *base; 506c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxqs; 507c5aff182SThomas Petazzoni struct mvneta_tx_queue *txqs; 508c5aff182SThomas Petazzoni struct net_device *dev; 50984a3f4dbSSebastian Andrzej Siewior struct hlist_node node_online; 51084a3f4dbSSebastian Andrzej Siewior struct hlist_node node_dead; 51190b74c01SGregory CLEMENT int rxq_def; 5125888511eSGregory CLEMENT /* Protect the access to the percpu interrupt registers, 5135888511eSGregory CLEMENT * ensuring that the configuration remains coherent. 5145888511eSGregory CLEMENT */ 5155888511eSGregory CLEMENT spinlock_t lock; 516120cfa50SGregory CLEMENT bool is_stopped; 517c5aff182SThomas Petazzoni 5182636ac3cSMarcin Wojtas u32 cause_rx_tx; 5192636ac3cSMarcin Wojtas struct napi_struct napi; 5202636ac3cSMarcin Wojtas 5210db51da7SLorenzo Bianconi struct bpf_prog *xdp_prog; 5220db51da7SLorenzo Bianconi 523c5aff182SThomas Petazzoni /* Core clock */ 524189dd626SThomas Petazzoni struct clk *clk; 52515cc4a4aSJisheng Zhang /* AXI clock */ 52615cc4a4aSJisheng Zhang struct clk *clk_bus; 527c5aff182SThomas Petazzoni u8 mcast_count[256]; 528c5aff182SThomas Petazzoni u16 tx_ring_size; 529c5aff182SThomas Petazzoni u16 rx_ring_size; 530c5aff182SThomas Petazzoni 531c5aff182SThomas Petazzoni phy_interface_t phy_interface; 532503f9aa9SRussell King struct device_node *dn; 533b65657fcSSimon Guinot unsigned int tx_csum_limit; 534503f9aa9SRussell King struct phylink *phylink; 53544cc27e4SIoana Ciornei struct phylink_config phylink_config; 536c2e7d2dfSRussell King struct phylink_pcs phylink_pcs; 537a10c1c81SRussell King struct phy *comphy; 5389b0cdefaSRussell King 539dc35a10fSMarcin Wojtas struct mvneta_bm *bm_priv; 540dc35a10fSMarcin Wojtas struct mvneta_bm_pool *pool_long; 541dc35a10fSMarcin Wojtas struct mvneta_bm_pool *pool_short; 542dc35a10fSMarcin Wojtas int bm_win_id; 543dc35a10fSMarcin Wojtas 5446d81f451SRussell King bool eee_enabled; 5456d81f451SRussell King bool eee_active; 5466d81f451SRussell King bool tx_lpi_enabled; 5476d81f451SRussell King 5489b0cdefaSRussell King u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; 5499a401deaSGregory CLEMENT 5509a401deaSGregory CLEMENT u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; 5512636ac3cSMarcin Wojtas 5522636ac3cSMarcin Wojtas /* Flags for special SoC configurations */ 5532636ac3cSMarcin Wojtas bool neta_armada3700; 5542d2a514cSChris Packham bool neta_ac5; 5558d5047cfSMarcin Wojtas u16 rx_offset_correction; 5569768b45cSJane Li const struct mbus_dram_target_info *dram_target_info; 557c5aff182SThomas Petazzoni }; 558c5aff182SThomas Petazzoni 5596a20c175SThomas Petazzoni /* The mvneta_tx_desc and mvneta_rx_desc structures describe the 560c5aff182SThomas Petazzoni * layout of the transmit and reception DMA descriptors, and their 561c5aff182SThomas Petazzoni * layout is therefore defined by the hardware design 562c5aff182SThomas Petazzoni */ 5636083ed44SThomas Petazzoni 564c5aff182SThomas Petazzoni #define MVNETA_TX_L3_OFF_SHIFT 0 565c5aff182SThomas Petazzoni #define MVNETA_TX_IP_HLEN_SHIFT 8 566c5aff182SThomas Petazzoni #define MVNETA_TX_L4_UDP BIT(16) 567c5aff182SThomas Petazzoni #define MVNETA_TX_L3_IP6 BIT(17) 568c5aff182SThomas Petazzoni #define MVNETA_TXD_IP_CSUM BIT(18) 569c5aff182SThomas Petazzoni #define MVNETA_TXD_Z_PAD BIT(19) 570c5aff182SThomas Petazzoni #define MVNETA_TXD_L_DESC BIT(20) 571c5aff182SThomas Petazzoni #define MVNETA_TXD_F_DESC BIT(21) 572c5aff182SThomas Petazzoni #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ 573c5aff182SThomas Petazzoni MVNETA_TXD_L_DESC | \ 574c5aff182SThomas Petazzoni MVNETA_TXD_F_DESC) 575c5aff182SThomas Petazzoni #define MVNETA_TX_L4_CSUM_FULL BIT(30) 576c5aff182SThomas Petazzoni #define MVNETA_TX_L4_CSUM_NOT BIT(31) 577c5aff182SThomas Petazzoni 578c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_CRC 0x0 579dc35a10fSMarcin Wojtas #define MVNETA_RXD_BM_POOL_SHIFT 13 580dc35a10fSMarcin Wojtas #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) 581c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_SUMMARY BIT(16) 582c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_OVERRUN BIT(17) 583c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_LEN BIT(18) 584c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) 585c5aff182SThomas Petazzoni #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) 586c5aff182SThomas Petazzoni #define MVNETA_RXD_L3_IP4 BIT(25) 587562e2f46SYelena Krivosheev #define MVNETA_RXD_LAST_DESC BIT(26) 588562e2f46SYelena Krivosheev #define MVNETA_RXD_FIRST_DESC BIT(27) 589562e2f46SYelena Krivosheev #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \ 590562e2f46SYelena Krivosheev MVNETA_RXD_LAST_DESC) 591c5aff182SThomas Petazzoni #define MVNETA_RXD_L4_CSUM_OK BIT(30) 592c5aff182SThomas Petazzoni 5939ad8fef6SThomas Petazzoni #if defined(__LITTLE_ENDIAN) 5946083ed44SThomas Petazzoni struct mvneta_tx_desc { 5956083ed44SThomas Petazzoni u32 command; /* Options used by HW for packet transmitting.*/ 596fbd1d524SAlexandre Belloni u16 reserved1; /* csum_l4 (for future use) */ 5976083ed44SThomas Petazzoni u16 data_size; /* Data size of transmitted packet in bytes */ 5986083ed44SThomas Petazzoni u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 5996083ed44SThomas Petazzoni u32 reserved2; /* hw_cmd - (for future use, PMT) */ 6006083ed44SThomas Petazzoni u32 reserved3[4]; /* Reserved - (for future use) */ 6016083ed44SThomas Petazzoni }; 6026083ed44SThomas Petazzoni 6036083ed44SThomas Petazzoni struct mvneta_rx_desc { 6046083ed44SThomas Petazzoni u32 status; /* Info about received packet */ 605c5aff182SThomas Petazzoni u16 reserved1; /* pnc_info - (for future use, PnC) */ 606c5aff182SThomas Petazzoni u16 data_size; /* Size of received packet in bytes */ 6076083ed44SThomas Petazzoni 608c5aff182SThomas Petazzoni u32 buf_phys_addr; /* Physical address of the buffer */ 609c5aff182SThomas Petazzoni u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 6106083ed44SThomas Petazzoni 611c5aff182SThomas Petazzoni u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 612c5aff182SThomas Petazzoni u16 reserved3; /* prefetch_cmd, for future use */ 613c5aff182SThomas Petazzoni u16 reserved4; /* csum_l4 - (for future use, PnC) */ 6146083ed44SThomas Petazzoni 615c5aff182SThomas Petazzoni u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 616c5aff182SThomas Petazzoni u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 617c5aff182SThomas Petazzoni }; 6189ad8fef6SThomas Petazzoni #else 6199ad8fef6SThomas Petazzoni struct mvneta_tx_desc { 6209ad8fef6SThomas Petazzoni u16 data_size; /* Data size of transmitted packet in bytes */ 621fbd1d524SAlexandre Belloni u16 reserved1; /* csum_l4 (for future use) */ 6229ad8fef6SThomas Petazzoni u32 command; /* Options used by HW for packet transmitting.*/ 6239ad8fef6SThomas Petazzoni u32 reserved2; /* hw_cmd - (for future use, PMT) */ 6249ad8fef6SThomas Petazzoni u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 6259ad8fef6SThomas Petazzoni u32 reserved3[4]; /* Reserved - (for future use) */ 6269ad8fef6SThomas Petazzoni }; 6279ad8fef6SThomas Petazzoni 6289ad8fef6SThomas Petazzoni struct mvneta_rx_desc { 6299ad8fef6SThomas Petazzoni u16 data_size; /* Size of received packet in bytes */ 6309ad8fef6SThomas Petazzoni u16 reserved1; /* pnc_info - (for future use, PnC) */ 6319ad8fef6SThomas Petazzoni u32 status; /* Info about received packet */ 6329ad8fef6SThomas Petazzoni 6339ad8fef6SThomas Petazzoni u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 6349ad8fef6SThomas Petazzoni u32 buf_phys_addr; /* Physical address of the buffer */ 6359ad8fef6SThomas Petazzoni 6369ad8fef6SThomas Petazzoni u16 reserved4; /* csum_l4 - (for future use, PnC) */ 6379ad8fef6SThomas Petazzoni u16 reserved3; /* prefetch_cmd, for future use */ 6389ad8fef6SThomas Petazzoni u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 6399ad8fef6SThomas Petazzoni 6409ad8fef6SThomas Petazzoni u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 6419ad8fef6SThomas Petazzoni u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 6429ad8fef6SThomas Petazzoni }; 6439ad8fef6SThomas Petazzoni #endif 644c5aff182SThomas Petazzoni 6459e58c8b4SLorenzo Bianconi enum mvneta_tx_buf_type { 646b0bd1b07SRussell King (Oracle) MVNETA_TYPE_TSO, 6479e58c8b4SLorenzo Bianconi MVNETA_TYPE_SKB, 6489e58c8b4SLorenzo Bianconi MVNETA_TYPE_XDP_TX, 6499e58c8b4SLorenzo Bianconi MVNETA_TYPE_XDP_NDO, 6509e58c8b4SLorenzo Bianconi }; 6519e58c8b4SLorenzo Bianconi 6529e58c8b4SLorenzo Bianconi struct mvneta_tx_buf { 6539e58c8b4SLorenzo Bianconi enum mvneta_tx_buf_type type; 6549e58c8b4SLorenzo Bianconi union { 6559e58c8b4SLorenzo Bianconi struct xdp_frame *xdpf; 6569e58c8b4SLorenzo Bianconi struct sk_buff *skb; 6579e58c8b4SLorenzo Bianconi }; 6589e58c8b4SLorenzo Bianconi }; 6599e58c8b4SLorenzo Bianconi 660c5aff182SThomas Petazzoni struct mvneta_tx_queue { 661c5aff182SThomas Petazzoni /* Number of this TX queue, in the range 0-7 */ 662c5aff182SThomas Petazzoni u8 id; 663c5aff182SThomas Petazzoni 664c5aff182SThomas Petazzoni /* Number of TX DMA descriptors in the descriptor ring */ 665c5aff182SThomas Petazzoni int size; 666c5aff182SThomas Petazzoni 667c5aff182SThomas Petazzoni /* Number of currently used TX DMA descriptor in the 6686a20c175SThomas Petazzoni * descriptor ring 6696a20c175SThomas Petazzoni */ 670c5aff182SThomas Petazzoni int count; 6712a90f7e1SSimon Guinot int pending; 6728eef5f97SEzequiel Garcia int tx_stop_threshold; 6738eef5f97SEzequiel Garcia int tx_wake_threshold; 674c5aff182SThomas Petazzoni 6759e58c8b4SLorenzo Bianconi /* Array of transmitted buffers */ 6769e58c8b4SLorenzo Bianconi struct mvneta_tx_buf *buf; 677c5aff182SThomas Petazzoni 678c5aff182SThomas Petazzoni /* Index of last TX DMA descriptor that was inserted */ 679c5aff182SThomas Petazzoni int txq_put_index; 680c5aff182SThomas Petazzoni 681c5aff182SThomas Petazzoni /* Index of the TX DMA descriptor to be cleaned up */ 682c5aff182SThomas Petazzoni int txq_get_index; 683c5aff182SThomas Petazzoni 684c5aff182SThomas Petazzoni u32 done_pkts_coal; 685c5aff182SThomas Petazzoni 686c5aff182SThomas Petazzoni /* Virtual address of the TX DMA descriptors array */ 687c5aff182SThomas Petazzoni struct mvneta_tx_desc *descs; 688c5aff182SThomas Petazzoni 689c5aff182SThomas Petazzoni /* DMA address of the TX DMA descriptors array */ 690c5aff182SThomas Petazzoni dma_addr_t descs_phys; 691c5aff182SThomas Petazzoni 692c5aff182SThomas Petazzoni /* Index of the last TX DMA descriptor */ 693c5aff182SThomas Petazzoni int last_desc; 694c5aff182SThomas Petazzoni 695c5aff182SThomas Petazzoni /* Index of the next TX DMA descriptor to process */ 696c5aff182SThomas Petazzoni int next_desc_to_proc; 6972adb719dSEzequiel Garcia 6982adb719dSEzequiel Garcia /* DMA buffers for TSO headers */ 69933f4cefbSRussell King (Oracle) char *tso_hdrs[MVNETA_MAX_TSO_PAGES]; 7002adb719dSEzequiel Garcia 7012adb719dSEzequiel Garcia /* DMA address of TSO headers */ 70233f4cefbSRussell King (Oracle) dma_addr_t tso_hdrs_phys[MVNETA_MAX_TSO_PAGES]; 70350bf8cb6SGregory CLEMENT 70450bf8cb6SGregory CLEMENT /* Affinity mask for CPUs*/ 70550bf8cb6SGregory CLEMENT cpumask_t affinity_mask; 706c5aff182SThomas Petazzoni }; 707c5aff182SThomas Petazzoni 708c5aff182SThomas Petazzoni struct mvneta_rx_queue { 709c5aff182SThomas Petazzoni /* rx queue number, in the range 0-7 */ 710c5aff182SThomas Petazzoni u8 id; 711c5aff182SThomas Petazzoni 712c5aff182SThomas Petazzoni /* num of rx descriptors in the rx descriptor ring */ 713c5aff182SThomas Petazzoni int size; 714c5aff182SThomas Petazzoni 715c5aff182SThomas Petazzoni u32 pkts_coal; 716c5aff182SThomas Petazzoni u32 time_coal; 717c5aff182SThomas Petazzoni 718568a3fa2SLorenzo Bianconi /* page_pool */ 719568a3fa2SLorenzo Bianconi struct page_pool *page_pool; 720568a3fa2SLorenzo Bianconi struct xdp_rxq_info xdp_rxq; 721568a3fa2SLorenzo Bianconi 722f88bee1cSGregory CLEMENT /* Virtual address of the RX buffer */ 723f88bee1cSGregory CLEMENT void **buf_virt_addr; 724f88bee1cSGregory CLEMENT 725c5aff182SThomas Petazzoni /* Virtual address of the RX DMA descriptors array */ 726c5aff182SThomas Petazzoni struct mvneta_rx_desc *descs; 727c5aff182SThomas Petazzoni 728c5aff182SThomas Petazzoni /* DMA address of the RX DMA descriptors array */ 729c5aff182SThomas Petazzoni dma_addr_t descs_phys; 730c5aff182SThomas Petazzoni 731c5aff182SThomas Petazzoni /* Index of the last RX DMA descriptor */ 732c5aff182SThomas Petazzoni int last_desc; 733c5aff182SThomas Petazzoni 734c5aff182SThomas Petazzoni /* Index of the next RX DMA descriptor to process */ 735c5aff182SThomas Petazzoni int next_desc_to_proc; 73617a96da6SGregory CLEMENT 737562e2f46SYelena Krivosheev /* Index of first RX DMA descriptor to refill */ 738562e2f46SYelena Krivosheev int first_to_refill; 739562e2f46SYelena Krivosheev u32 refill_num; 740c5aff182SThomas Petazzoni }; 741c5aff182SThomas Petazzoni 74284a3f4dbSSebastian Andrzej Siewior static enum cpuhp_state online_hpstate; 743edadb7faSEzequiel Garcia /* The hardware supports eight (8) rx queues, but we are only allowing 744edadb7faSEzequiel Garcia * the first one to be used. Therefore, let's just allocate one queue. 745edadb7faSEzequiel Garcia */ 746d8936657SMaxime Ripard static int rxq_number = 8; 747c5aff182SThomas Petazzoni static int txq_number = 8; 748c5aff182SThomas Petazzoni 749c5aff182SThomas Petazzoni static int rxq_def; 750c5aff182SThomas Petazzoni 751f19fadfcSwilly tarreau static int rx_copybreak __read_mostly = 256; 752f19fadfcSwilly tarreau 753dc35a10fSMarcin Wojtas /* HW BM need that each port be identify by a unique ID */ 754dc35a10fSMarcin Wojtas static int global_port_id; 755dc35a10fSMarcin Wojtas 756c5aff182SThomas Petazzoni #define MVNETA_DRIVER_NAME "mvneta" 757c5aff182SThomas Petazzoni #define MVNETA_DRIVER_VERSION "1.0" 758c5aff182SThomas Petazzoni 759c5aff182SThomas Petazzoni /* Utility/helper methods */ 760c5aff182SThomas Petazzoni 761c5aff182SThomas Petazzoni /* Write helper method */ 762c5aff182SThomas Petazzoni static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) 763c5aff182SThomas Petazzoni { 764c5aff182SThomas Petazzoni writel(data, pp->base + offset); 765c5aff182SThomas Petazzoni } 766c5aff182SThomas Petazzoni 767c5aff182SThomas Petazzoni /* Read helper method */ 768c5aff182SThomas Petazzoni static u32 mvreg_read(struct mvneta_port *pp, u32 offset) 769c5aff182SThomas Petazzoni { 770c5aff182SThomas Petazzoni return readl(pp->base + offset); 771c5aff182SThomas Petazzoni } 772c5aff182SThomas Petazzoni 773c5aff182SThomas Petazzoni /* Increment txq get counter */ 774c5aff182SThomas Petazzoni static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) 775c5aff182SThomas Petazzoni { 776c5aff182SThomas Petazzoni txq->txq_get_index++; 777c5aff182SThomas Petazzoni if (txq->txq_get_index == txq->size) 778c5aff182SThomas Petazzoni txq->txq_get_index = 0; 779c5aff182SThomas Petazzoni } 780c5aff182SThomas Petazzoni 781c5aff182SThomas Petazzoni /* Increment txq put counter */ 782c5aff182SThomas Petazzoni static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) 783c5aff182SThomas Petazzoni { 784c5aff182SThomas Petazzoni txq->txq_put_index++; 785c5aff182SThomas Petazzoni if (txq->txq_put_index == txq->size) 786c5aff182SThomas Petazzoni txq->txq_put_index = 0; 787c5aff182SThomas Petazzoni } 788c5aff182SThomas Petazzoni 789c5aff182SThomas Petazzoni 790c5aff182SThomas Petazzoni /* Clear all MIB counters */ 791c5aff182SThomas Petazzoni static void mvneta_mib_counters_clear(struct mvneta_port *pp) 792c5aff182SThomas Petazzoni { 793c5aff182SThomas Petazzoni int i; 794c5aff182SThomas Petazzoni 795c5aff182SThomas Petazzoni /* Perform dummy reads from MIB counters */ 796c5aff182SThomas Petazzoni for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) 797098c2fc6SZhang Changzhong mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); 798098c2fc6SZhang Changzhong mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); 799098c2fc6SZhang Changzhong mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); 800c5aff182SThomas Petazzoni } 801c5aff182SThomas Petazzoni 802c5aff182SThomas Petazzoni /* Get System Network Statistics */ 803bc1f4470Sstephen hemminger static void 8042dc0d2b4SBaoyou Xie mvneta_get_stats64(struct net_device *dev, 805c5aff182SThomas Petazzoni struct rtnl_link_stats64 *stats) 806c5aff182SThomas Petazzoni { 807c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 808c5aff182SThomas Petazzoni unsigned int start; 80974c41b04Swilly tarreau int cpu; 810c5aff182SThomas Petazzoni 81174c41b04Swilly tarreau for_each_possible_cpu(cpu) { 81274c41b04Swilly tarreau struct mvneta_pcpu_stats *cpu_stats; 81374c41b04Swilly tarreau u64 rx_packets; 81474c41b04Swilly tarreau u64 rx_bytes; 815c35947b8SLorenzo Bianconi u64 rx_dropped; 816c35947b8SLorenzo Bianconi u64 rx_errors; 81774c41b04Swilly tarreau u64 tx_packets; 81874c41b04Swilly tarreau u64 tx_bytes; 819c5aff182SThomas Petazzoni 82074c41b04Swilly tarreau cpu_stats = per_cpu_ptr(pp->stats, cpu); 821c5aff182SThomas Petazzoni do { 822068c38adSThomas Gleixner start = u64_stats_fetch_begin(&cpu_stats->syncp); 823320d5441SLorenzo Bianconi rx_packets = cpu_stats->es.ps.rx_packets; 824320d5441SLorenzo Bianconi rx_bytes = cpu_stats->es.ps.rx_bytes; 825c35947b8SLorenzo Bianconi rx_dropped = cpu_stats->rx_dropped; 826c35947b8SLorenzo Bianconi rx_errors = cpu_stats->rx_errors; 827320d5441SLorenzo Bianconi tx_packets = cpu_stats->es.ps.tx_packets; 828320d5441SLorenzo Bianconi tx_bytes = cpu_stats->es.ps.tx_bytes; 829068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 830c5aff182SThomas Petazzoni 83174c41b04Swilly tarreau stats->rx_packets += rx_packets; 83274c41b04Swilly tarreau stats->rx_bytes += rx_bytes; 833c35947b8SLorenzo Bianconi stats->rx_dropped += rx_dropped; 834c35947b8SLorenzo Bianconi stats->rx_errors += rx_errors; 83574c41b04Swilly tarreau stats->tx_packets += tx_packets; 83674c41b04Swilly tarreau stats->tx_bytes += tx_bytes; 83774c41b04Swilly tarreau } 838c5aff182SThomas Petazzoni 839c5aff182SThomas Petazzoni stats->tx_dropped = dev->stats.tx_dropped; 840c5aff182SThomas Petazzoni } 841c5aff182SThomas Petazzoni 842c5aff182SThomas Petazzoni /* Rx descriptors helper methods */ 843c5aff182SThomas Petazzoni 8445428213cSwilly tarreau /* Checks whether the RX descriptor having this status is both the first 8455428213cSwilly tarreau * and the last descriptor for the RX packet. Each RX packet is currently 846c5aff182SThomas Petazzoni * received through a single RX descriptor, so not having each RX 847c5aff182SThomas Petazzoni * descriptor with its first and last bits set is an error 848c5aff182SThomas Petazzoni */ 8495428213cSwilly tarreau static int mvneta_rxq_desc_is_first_last(u32 status) 850c5aff182SThomas Petazzoni { 8515428213cSwilly tarreau return (status & MVNETA_RXD_FIRST_LAST_DESC) == 852c5aff182SThomas Petazzoni MVNETA_RXD_FIRST_LAST_DESC; 853c5aff182SThomas Petazzoni } 854c5aff182SThomas Petazzoni 855c5aff182SThomas Petazzoni /* Add number of descriptors ready to receive new packets */ 856c5aff182SThomas Petazzoni static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, 857c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, 858c5aff182SThomas Petazzoni int ndescs) 859c5aff182SThomas Petazzoni { 860c5aff182SThomas Petazzoni /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can 8616a20c175SThomas Petazzoni * be added at once 8626a20c175SThomas Petazzoni */ 863c5aff182SThomas Petazzoni while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { 864c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 865c5aff182SThomas Petazzoni (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << 866c5aff182SThomas Petazzoni MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 867c5aff182SThomas Petazzoni ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; 868c5aff182SThomas Petazzoni } 869c5aff182SThomas Petazzoni 870c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 871c5aff182SThomas Petazzoni (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 872c5aff182SThomas Petazzoni } 873c5aff182SThomas Petazzoni 874c5aff182SThomas Petazzoni /* Get number of RX descriptors occupied by received packets */ 875c5aff182SThomas Petazzoni static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, 876c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 877c5aff182SThomas Petazzoni { 878c5aff182SThomas Petazzoni u32 val; 879c5aff182SThomas Petazzoni 880c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); 881c5aff182SThomas Petazzoni return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; 882c5aff182SThomas Petazzoni } 883c5aff182SThomas Petazzoni 8846a20c175SThomas Petazzoni /* Update num of rx desc called upon return from rx path or 885c5aff182SThomas Petazzoni * from mvneta_rxq_drop_pkts(). 886c5aff182SThomas Petazzoni */ 887c5aff182SThomas Petazzoni static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, 888c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, 889c5aff182SThomas Petazzoni int rx_done, int rx_filled) 890c5aff182SThomas Petazzoni { 891c5aff182SThomas Petazzoni u32 val; 892c5aff182SThomas Petazzoni 893c5aff182SThomas Petazzoni if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { 894c5aff182SThomas Petazzoni val = rx_done | 895c5aff182SThomas Petazzoni (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); 896c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 897c5aff182SThomas Petazzoni return; 898c5aff182SThomas Petazzoni } 899c5aff182SThomas Petazzoni 900c5aff182SThomas Petazzoni /* Only 255 descriptors can be added at once */ 901c5aff182SThomas Petazzoni while ((rx_done > 0) || (rx_filled > 0)) { 902c5aff182SThomas Petazzoni if (rx_done <= 0xff) { 903c5aff182SThomas Petazzoni val = rx_done; 904c5aff182SThomas Petazzoni rx_done = 0; 905c5aff182SThomas Petazzoni } else { 906c5aff182SThomas Petazzoni val = 0xff; 907c5aff182SThomas Petazzoni rx_done -= 0xff; 908c5aff182SThomas Petazzoni } 909c5aff182SThomas Petazzoni if (rx_filled <= 0xff) { 910c5aff182SThomas Petazzoni val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 911c5aff182SThomas Petazzoni rx_filled = 0; 912c5aff182SThomas Petazzoni } else { 913c5aff182SThomas Petazzoni val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 914c5aff182SThomas Petazzoni rx_filled -= 0xff; 915c5aff182SThomas Petazzoni } 916c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 917c5aff182SThomas Petazzoni } 918c5aff182SThomas Petazzoni } 919c5aff182SThomas Petazzoni 920c5aff182SThomas Petazzoni /* Get pointer to next RX descriptor to be processed by SW */ 921c5aff182SThomas Petazzoni static struct mvneta_rx_desc * 922c5aff182SThomas Petazzoni mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) 923c5aff182SThomas Petazzoni { 924c5aff182SThomas Petazzoni int rx_desc = rxq->next_desc_to_proc; 925c5aff182SThomas Petazzoni 926c5aff182SThomas Petazzoni rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); 92734e4179dSwilly tarreau prefetch(rxq->descs + rxq->next_desc_to_proc); 928c5aff182SThomas Petazzoni return rxq->descs + rx_desc; 929c5aff182SThomas Petazzoni } 930c5aff182SThomas Petazzoni 931c5aff182SThomas Petazzoni /* Change maximum receive size of the port. */ 932c5aff182SThomas Petazzoni static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) 933c5aff182SThomas Petazzoni { 934c5aff182SThomas Petazzoni u32 val; 935c5aff182SThomas Petazzoni 936c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 937c5aff182SThomas Petazzoni val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; 938c5aff182SThomas Petazzoni val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << 939c5aff182SThomas Petazzoni MVNETA_GMAC_MAX_RX_SIZE_SHIFT; 940c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 941c5aff182SThomas Petazzoni } 942c5aff182SThomas Petazzoni 943c5aff182SThomas Petazzoni 944c5aff182SThomas Petazzoni /* Set rx queue offset */ 945c5aff182SThomas Petazzoni static void mvneta_rxq_offset_set(struct mvneta_port *pp, 946c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, 947c5aff182SThomas Petazzoni int offset) 948c5aff182SThomas Petazzoni { 949c5aff182SThomas Petazzoni u32 val; 950c5aff182SThomas Petazzoni 951c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 952c5aff182SThomas Petazzoni val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; 953c5aff182SThomas Petazzoni 954c5aff182SThomas Petazzoni /* Offset is in */ 955c5aff182SThomas Petazzoni val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); 956c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 957c5aff182SThomas Petazzoni } 958c5aff182SThomas Petazzoni 959c5aff182SThomas Petazzoni 960c5aff182SThomas Petazzoni /* Tx descriptors helper methods */ 961c5aff182SThomas Petazzoni 962c5aff182SThomas Petazzoni /* Update HW with number of TX descriptors to be sent */ 963c5aff182SThomas Petazzoni static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, 964c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq, 965c5aff182SThomas Petazzoni int pend_desc) 966c5aff182SThomas Petazzoni { 967c5aff182SThomas Petazzoni u32 val; 968c5aff182SThomas Petazzoni 9690d63785cSSimon Guinot pend_desc += txq->pending; 9700d63785cSSimon Guinot 9710d63785cSSimon Guinot /* Only 255 Tx descriptors can be added at once */ 9720d63785cSSimon Guinot do { 9730d63785cSSimon Guinot val = min(pend_desc, 255); 974c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 9750d63785cSSimon Guinot pend_desc -= val; 9760d63785cSSimon Guinot } while (pend_desc > 0); 9772a90f7e1SSimon Guinot txq->pending = 0; 978c5aff182SThomas Petazzoni } 979c5aff182SThomas Petazzoni 980c5aff182SThomas Petazzoni /* Get pointer to next TX descriptor to be processed (send) by HW */ 981c5aff182SThomas Petazzoni static struct mvneta_tx_desc * 982c5aff182SThomas Petazzoni mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) 983c5aff182SThomas Petazzoni { 984c5aff182SThomas Petazzoni int tx_desc = txq->next_desc_to_proc; 985c5aff182SThomas Petazzoni 986c5aff182SThomas Petazzoni txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); 987c5aff182SThomas Petazzoni return txq->descs + tx_desc; 988c5aff182SThomas Petazzoni } 989c5aff182SThomas Petazzoni 990c5aff182SThomas Petazzoni /* Release the last allocated TX descriptor. Useful to handle DMA 9916a20c175SThomas Petazzoni * mapping failures in the TX path. 9926a20c175SThomas Petazzoni */ 993c5aff182SThomas Petazzoni static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) 994c5aff182SThomas Petazzoni { 995c5aff182SThomas Petazzoni if (txq->next_desc_to_proc == 0) 996c5aff182SThomas Petazzoni txq->next_desc_to_proc = txq->last_desc - 1; 997c5aff182SThomas Petazzoni else 998c5aff182SThomas Petazzoni txq->next_desc_to_proc--; 999c5aff182SThomas Petazzoni } 1000c5aff182SThomas Petazzoni 1001c5aff182SThomas Petazzoni /* Set rxq buf size */ 1002c5aff182SThomas Petazzoni static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, 1003c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, 1004c5aff182SThomas Petazzoni int buf_size) 1005c5aff182SThomas Petazzoni { 1006c5aff182SThomas Petazzoni u32 val; 1007c5aff182SThomas Petazzoni 1008c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); 1009c5aff182SThomas Petazzoni 1010c5aff182SThomas Petazzoni val &= ~MVNETA_RXQ_BUF_SIZE_MASK; 1011c5aff182SThomas Petazzoni val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); 1012c5aff182SThomas Petazzoni 1013c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); 1014c5aff182SThomas Petazzoni } 1015c5aff182SThomas Petazzoni 1016c5aff182SThomas Petazzoni /* Disable buffer management (BM) */ 1017c5aff182SThomas Petazzoni static void mvneta_rxq_bm_disable(struct mvneta_port *pp, 1018c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 1019c5aff182SThomas Petazzoni { 1020c5aff182SThomas Petazzoni u32 val; 1021c5aff182SThomas Petazzoni 1022c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1023c5aff182SThomas Petazzoni val &= ~MVNETA_RXQ_HW_BUF_ALLOC; 1024c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1025c5aff182SThomas Petazzoni } 1026c5aff182SThomas Petazzoni 1027dc35a10fSMarcin Wojtas /* Enable buffer management (BM) */ 1028dc35a10fSMarcin Wojtas static void mvneta_rxq_bm_enable(struct mvneta_port *pp, 1029dc35a10fSMarcin Wojtas struct mvneta_rx_queue *rxq) 1030dc35a10fSMarcin Wojtas { 1031dc35a10fSMarcin Wojtas u32 val; 1032dc35a10fSMarcin Wojtas 1033dc35a10fSMarcin Wojtas val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1034dc35a10fSMarcin Wojtas val |= MVNETA_RXQ_HW_BUF_ALLOC; 1035dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1036dc35a10fSMarcin Wojtas } 1037dc35a10fSMarcin Wojtas 1038dc35a10fSMarcin Wojtas /* Notify HW about port's assignment of pool for bigger packets */ 1039dc35a10fSMarcin Wojtas static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, 1040dc35a10fSMarcin Wojtas struct mvneta_rx_queue *rxq) 1041dc35a10fSMarcin Wojtas { 1042dc35a10fSMarcin Wojtas u32 val; 1043dc35a10fSMarcin Wojtas 1044dc35a10fSMarcin Wojtas val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1045dc35a10fSMarcin Wojtas val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; 1046dc35a10fSMarcin Wojtas val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); 1047dc35a10fSMarcin Wojtas 1048dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1049dc35a10fSMarcin Wojtas } 1050dc35a10fSMarcin Wojtas 1051dc35a10fSMarcin Wojtas /* Notify HW about port's assignment of pool for smaller packets */ 1052dc35a10fSMarcin Wojtas static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, 1053dc35a10fSMarcin Wojtas struct mvneta_rx_queue *rxq) 1054dc35a10fSMarcin Wojtas { 1055dc35a10fSMarcin Wojtas u32 val; 1056dc35a10fSMarcin Wojtas 1057dc35a10fSMarcin Wojtas val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1058dc35a10fSMarcin Wojtas val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; 1059dc35a10fSMarcin Wojtas val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); 1060dc35a10fSMarcin Wojtas 1061dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1062dc35a10fSMarcin Wojtas } 1063dc35a10fSMarcin Wojtas 1064dc35a10fSMarcin Wojtas /* Set port's receive buffer size for assigned BM pool */ 1065dc35a10fSMarcin Wojtas static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, 1066dc35a10fSMarcin Wojtas int buf_size, 1067dc35a10fSMarcin Wojtas u8 pool_id) 1068dc35a10fSMarcin Wojtas { 1069dc35a10fSMarcin Wojtas u32 val; 1070dc35a10fSMarcin Wojtas 1071dc35a10fSMarcin Wojtas if (!IS_ALIGNED(buf_size, 8)) { 1072dc35a10fSMarcin Wojtas dev_warn(pp->dev->dev.parent, 1073dc35a10fSMarcin Wojtas "illegal buf_size value %d, round to %d\n", 1074dc35a10fSMarcin Wojtas buf_size, ALIGN(buf_size, 8)); 1075dc35a10fSMarcin Wojtas buf_size = ALIGN(buf_size, 8); 1076dc35a10fSMarcin Wojtas } 1077dc35a10fSMarcin Wojtas 1078dc35a10fSMarcin Wojtas val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); 1079dc35a10fSMarcin Wojtas val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; 1080dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); 1081dc35a10fSMarcin Wojtas } 1082dc35a10fSMarcin Wojtas 1083dc35a10fSMarcin Wojtas /* Configure MBUS window in order to enable access BM internal SRAM */ 1084dc35a10fSMarcin Wojtas static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, 1085dc35a10fSMarcin Wojtas u8 target, u8 attr) 1086dc35a10fSMarcin Wojtas { 1087dc35a10fSMarcin Wojtas u32 win_enable, win_protect; 1088dc35a10fSMarcin Wojtas int i; 1089dc35a10fSMarcin Wojtas 1090dc35a10fSMarcin Wojtas win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); 1091dc35a10fSMarcin Wojtas 1092dc35a10fSMarcin Wojtas if (pp->bm_win_id < 0) { 1093dc35a10fSMarcin Wojtas /* Find first not occupied window */ 1094dc35a10fSMarcin Wojtas for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { 1095dc35a10fSMarcin Wojtas if (win_enable & (1 << i)) { 1096dc35a10fSMarcin Wojtas pp->bm_win_id = i; 1097dc35a10fSMarcin Wojtas break; 1098dc35a10fSMarcin Wojtas } 1099dc35a10fSMarcin Wojtas } 1100dc35a10fSMarcin Wojtas if (i == MVNETA_MAX_DECODE_WIN) 1101dc35a10fSMarcin Wojtas return -ENOMEM; 1102dc35a10fSMarcin Wojtas } else { 1103dc35a10fSMarcin Wojtas i = pp->bm_win_id; 1104dc35a10fSMarcin Wojtas } 1105dc35a10fSMarcin Wojtas 1106dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 1107dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 1108dc35a10fSMarcin Wojtas 1109dc35a10fSMarcin Wojtas if (i < 4) 1110dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 1111dc35a10fSMarcin Wojtas 1112dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | 1113dc35a10fSMarcin Wojtas (attr << 8) | target); 1114dc35a10fSMarcin Wojtas 1115dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); 1116dc35a10fSMarcin Wojtas 1117dc35a10fSMarcin Wojtas win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); 1118dc35a10fSMarcin Wojtas win_protect |= 3 << (2 * i); 1119dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 1120dc35a10fSMarcin Wojtas 1121dc35a10fSMarcin Wojtas win_enable &= ~(1 << i); 1122dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 1123dc35a10fSMarcin Wojtas 1124dc35a10fSMarcin Wojtas return 0; 1125dc35a10fSMarcin Wojtas } 1126dc35a10fSMarcin Wojtas 11272636ac3cSMarcin Wojtas static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) 1128dc35a10fSMarcin Wojtas { 11292636ac3cSMarcin Wojtas u32 wsize; 1130dc35a10fSMarcin Wojtas u8 target, attr; 1131dc35a10fSMarcin Wojtas int err; 1132dc35a10fSMarcin Wojtas 1133dc35a10fSMarcin Wojtas /* Get BM window information */ 1134dc35a10fSMarcin Wojtas err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, 1135dc35a10fSMarcin Wojtas &target, &attr); 1136dc35a10fSMarcin Wojtas if (err < 0) 1137dc35a10fSMarcin Wojtas return err; 1138dc35a10fSMarcin Wojtas 1139dc35a10fSMarcin Wojtas pp->bm_win_id = -1; 1140dc35a10fSMarcin Wojtas 1141dc35a10fSMarcin Wojtas /* Open NETA -> BM window */ 1142dc35a10fSMarcin Wojtas err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, 1143dc35a10fSMarcin Wojtas target, attr); 1144dc35a10fSMarcin Wojtas if (err < 0) { 1145dc35a10fSMarcin Wojtas netdev_info(pp->dev, "fail to configure mbus window to BM\n"); 1146dc35a10fSMarcin Wojtas return err; 1147dc35a10fSMarcin Wojtas } 11482636ac3cSMarcin Wojtas return 0; 11492636ac3cSMarcin Wojtas } 11502636ac3cSMarcin Wojtas 11512636ac3cSMarcin Wojtas /* Assign and initialize pools for port. In case of fail 11522636ac3cSMarcin Wojtas * buffer manager will remain disabled for current port. 11532636ac3cSMarcin Wojtas */ 11542636ac3cSMarcin Wojtas static int mvneta_bm_port_init(struct platform_device *pdev, 11552636ac3cSMarcin Wojtas struct mvneta_port *pp) 11562636ac3cSMarcin Wojtas { 11572636ac3cSMarcin Wojtas struct device_node *dn = pdev->dev.of_node; 11582636ac3cSMarcin Wojtas u32 long_pool_id, short_pool_id; 11592636ac3cSMarcin Wojtas 11602636ac3cSMarcin Wojtas if (!pp->neta_armada3700) { 11612636ac3cSMarcin Wojtas int ret; 11622636ac3cSMarcin Wojtas 11632636ac3cSMarcin Wojtas ret = mvneta_bm_port_mbus_init(pp); 11642636ac3cSMarcin Wojtas if (ret) 11652636ac3cSMarcin Wojtas return ret; 11662636ac3cSMarcin Wojtas } 1167dc35a10fSMarcin Wojtas 1168dc35a10fSMarcin Wojtas if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { 1169dc35a10fSMarcin Wojtas netdev_info(pp->dev, "missing long pool id\n"); 1170dc35a10fSMarcin Wojtas return -EINVAL; 1171dc35a10fSMarcin Wojtas } 1172dc35a10fSMarcin Wojtas 1173dc35a10fSMarcin Wojtas /* Create port's long pool depending on mtu */ 1174dc35a10fSMarcin Wojtas pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, 1175dc35a10fSMarcin Wojtas MVNETA_BM_LONG, pp->id, 1176dc35a10fSMarcin Wojtas MVNETA_RX_PKT_SIZE(pp->dev->mtu)); 1177dc35a10fSMarcin Wojtas if (!pp->pool_long) { 1178dc35a10fSMarcin Wojtas netdev_info(pp->dev, "fail to obtain long pool for port\n"); 1179dc35a10fSMarcin Wojtas return -ENOMEM; 1180dc35a10fSMarcin Wojtas } 1181dc35a10fSMarcin Wojtas 1182dc35a10fSMarcin Wojtas pp->pool_long->port_map |= 1 << pp->id; 1183dc35a10fSMarcin Wojtas 1184dc35a10fSMarcin Wojtas mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, 1185dc35a10fSMarcin Wojtas pp->pool_long->id); 1186dc35a10fSMarcin Wojtas 1187dc35a10fSMarcin Wojtas /* If short pool id is not defined, assume using single pool */ 1188dc35a10fSMarcin Wojtas if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) 1189dc35a10fSMarcin Wojtas short_pool_id = long_pool_id; 1190dc35a10fSMarcin Wojtas 1191dc35a10fSMarcin Wojtas /* Create port's short pool */ 1192dc35a10fSMarcin Wojtas pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, 1193dc35a10fSMarcin Wojtas MVNETA_BM_SHORT, pp->id, 1194dc35a10fSMarcin Wojtas MVNETA_BM_SHORT_PKT_SIZE); 1195dc35a10fSMarcin Wojtas if (!pp->pool_short) { 1196dc35a10fSMarcin Wojtas netdev_info(pp->dev, "fail to obtain short pool for port\n"); 1197dc35a10fSMarcin Wojtas mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 1198dc35a10fSMarcin Wojtas return -ENOMEM; 1199dc35a10fSMarcin Wojtas } 1200dc35a10fSMarcin Wojtas 1201dc35a10fSMarcin Wojtas if (short_pool_id != long_pool_id) { 1202dc35a10fSMarcin Wojtas pp->pool_short->port_map |= 1 << pp->id; 1203dc35a10fSMarcin Wojtas mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, 1204dc35a10fSMarcin Wojtas pp->pool_short->id); 1205dc35a10fSMarcin Wojtas } 1206dc35a10fSMarcin Wojtas 1207dc35a10fSMarcin Wojtas return 0; 1208dc35a10fSMarcin Wojtas } 1209dc35a10fSMarcin Wojtas 1210dc35a10fSMarcin Wojtas /* Update settings of a pool for bigger packets */ 1211dc35a10fSMarcin Wojtas static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) 1212dc35a10fSMarcin Wojtas { 1213dc35a10fSMarcin Wojtas struct mvneta_bm_pool *bm_pool = pp->pool_long; 1214baa11ebcSGregory CLEMENT struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; 1215dc35a10fSMarcin Wojtas int num; 1216dc35a10fSMarcin Wojtas 1217dc35a10fSMarcin Wojtas /* Release all buffers from long pool */ 1218dc35a10fSMarcin Wojtas mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); 1219baa11ebcSGregory CLEMENT if (hwbm_pool->buf_num) { 1220dc35a10fSMarcin Wojtas WARN(1, "cannot free all buffers in pool %d\n", 1221dc35a10fSMarcin Wojtas bm_pool->id); 1222dc35a10fSMarcin Wojtas goto bm_mtu_err; 1223dc35a10fSMarcin Wojtas } 1224dc35a10fSMarcin Wojtas 1225dc35a10fSMarcin Wojtas bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); 1226dc35a10fSMarcin Wojtas bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); 1227baa11ebcSGregory CLEMENT hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1228dc35a10fSMarcin Wojtas SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); 1229dc35a10fSMarcin Wojtas 1230dc35a10fSMarcin Wojtas /* Fill entire long pool */ 12316dcdd884SSebastian Andrzej Siewior num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); 1232baa11ebcSGregory CLEMENT if (num != hwbm_pool->size) { 1233dc35a10fSMarcin Wojtas WARN(1, "pool %d: %d of %d allocated\n", 1234baa11ebcSGregory CLEMENT bm_pool->id, num, hwbm_pool->size); 1235dc35a10fSMarcin Wojtas goto bm_mtu_err; 1236dc35a10fSMarcin Wojtas } 1237dc35a10fSMarcin Wojtas mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); 1238dc35a10fSMarcin Wojtas 1239dc35a10fSMarcin Wojtas return; 1240dc35a10fSMarcin Wojtas 1241dc35a10fSMarcin Wojtas bm_mtu_err: 1242dc35a10fSMarcin Wojtas mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 1243dc35a10fSMarcin Wojtas mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); 1244dc35a10fSMarcin Wojtas 1245dc35a10fSMarcin Wojtas pp->bm_priv = NULL; 124644efc78dSLorenzo Bianconi pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 1247dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); 1248dc35a10fSMarcin Wojtas netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); 1249dc35a10fSMarcin Wojtas } 1250dc35a10fSMarcin Wojtas 1251c5aff182SThomas Petazzoni /* Start the Ethernet port RX and TX activity */ 1252c5aff182SThomas Petazzoni static void mvneta_port_up(struct mvneta_port *pp) 1253c5aff182SThomas Petazzoni { 1254c5aff182SThomas Petazzoni int queue; 1255c5aff182SThomas Petazzoni u32 q_map; 1256c5aff182SThomas Petazzoni 1257c5aff182SThomas Petazzoni /* Enable all initialized TXs. */ 1258c5aff182SThomas Petazzoni q_map = 0; 1259c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 1260c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq = &pp->txqs[queue]; 1261f95936ccSMarkus Elfring if (txq->descs) 1262c5aff182SThomas Petazzoni q_map |= (1 << queue); 1263c5aff182SThomas Petazzoni } 1264c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 1265c5aff182SThomas Petazzoni 1266e81b5e01SYelena Krivosheev q_map = 0; 1267c5aff182SThomas Petazzoni /* Enable all initialized RXQs. */ 12682dcf75e2SGregory CLEMENT for (queue = 0; queue < rxq_number; queue++) { 12692dcf75e2SGregory CLEMENT struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 12702dcf75e2SGregory CLEMENT 1271f95936ccSMarkus Elfring if (rxq->descs) 12722dcf75e2SGregory CLEMENT q_map |= (1 << queue); 12732dcf75e2SGregory CLEMENT } 12742dcf75e2SGregory CLEMENT mvreg_write(pp, MVNETA_RXQ_CMD, q_map); 1275c5aff182SThomas Petazzoni } 1276c5aff182SThomas Petazzoni 1277c5aff182SThomas Petazzoni /* Stop the Ethernet port activity */ 1278c5aff182SThomas Petazzoni static void mvneta_port_down(struct mvneta_port *pp) 1279c5aff182SThomas Petazzoni { 1280c5aff182SThomas Petazzoni u32 val; 1281c5aff182SThomas Petazzoni int count; 1282c5aff182SThomas Petazzoni 1283c5aff182SThomas Petazzoni /* Stop Rx port activity. Check port Rx activity. */ 1284c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; 1285c5aff182SThomas Petazzoni 1286c5aff182SThomas Petazzoni /* Issue stop command for active channels only */ 1287c5aff182SThomas Petazzoni if (val != 0) 1288c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_CMD, 1289c5aff182SThomas Petazzoni val << MVNETA_RXQ_DISABLE_SHIFT); 1290c5aff182SThomas Petazzoni 1291c5aff182SThomas Petazzoni /* Wait for all Rx activity to terminate. */ 1292c5aff182SThomas Petazzoni count = 0; 1293c5aff182SThomas Petazzoni do { 1294c5aff182SThomas Petazzoni if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { 1295c5aff182SThomas Petazzoni netdev_warn(pp->dev, 12960838abb3SDmitri Epshtein "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", 1297c5aff182SThomas Petazzoni val); 1298c5aff182SThomas Petazzoni break; 1299c5aff182SThomas Petazzoni } 1300c5aff182SThomas Petazzoni mdelay(1); 1301c5aff182SThomas Petazzoni 1302c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_RXQ_CMD); 1303a3703fb3SDmitri Epshtein } while (val & MVNETA_RXQ_ENABLE_MASK); 1304c5aff182SThomas Petazzoni 1305c5aff182SThomas Petazzoni /* Stop Tx port activity. Check port Tx activity. Issue stop 13066a20c175SThomas Petazzoni * command for active channels only 13076a20c175SThomas Petazzoni */ 1308c5aff182SThomas Petazzoni val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; 1309c5aff182SThomas Petazzoni 1310c5aff182SThomas Petazzoni if (val != 0) 1311c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_CMD, 1312c5aff182SThomas Petazzoni (val << MVNETA_TXQ_DISABLE_SHIFT)); 1313c5aff182SThomas Petazzoni 1314c5aff182SThomas Petazzoni /* Wait for all Tx activity to terminate. */ 1315c5aff182SThomas Petazzoni count = 0; 1316c5aff182SThomas Petazzoni do { 1317c5aff182SThomas Petazzoni if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { 1318c5aff182SThomas Petazzoni netdev_warn(pp->dev, 1319c5aff182SThomas Petazzoni "TIMEOUT for TX stopped status=0x%08x\n", 1320c5aff182SThomas Petazzoni val); 1321c5aff182SThomas Petazzoni break; 1322c5aff182SThomas Petazzoni } 1323c5aff182SThomas Petazzoni mdelay(1); 1324c5aff182SThomas Petazzoni 1325c5aff182SThomas Petazzoni /* Check TX Command reg that all Txqs are stopped */ 1326c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TXQ_CMD); 1327c5aff182SThomas Petazzoni 1328a3703fb3SDmitri Epshtein } while (val & MVNETA_TXQ_ENABLE_MASK); 1329c5aff182SThomas Petazzoni 1330c5aff182SThomas Petazzoni /* Double check to verify that TX FIFO is empty */ 1331c5aff182SThomas Petazzoni count = 0; 1332c5aff182SThomas Petazzoni do { 1333c5aff182SThomas Petazzoni if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { 1334c5aff182SThomas Petazzoni netdev_warn(pp->dev, 13350838abb3SDmitri Epshtein "TX FIFO empty timeout status=0x%08x\n", 1336c5aff182SThomas Petazzoni val); 1337c5aff182SThomas Petazzoni break; 1338c5aff182SThomas Petazzoni } 1339c5aff182SThomas Petazzoni mdelay(1); 1340c5aff182SThomas Petazzoni 1341c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_PORT_STATUS); 1342c5aff182SThomas Petazzoni } while (!(val & MVNETA_TX_FIFO_EMPTY) && 1343c5aff182SThomas Petazzoni (val & MVNETA_TX_IN_PRGRS)); 1344c5aff182SThomas Petazzoni 1345c5aff182SThomas Petazzoni udelay(200); 1346c5aff182SThomas Petazzoni } 1347c5aff182SThomas Petazzoni 1348c5aff182SThomas Petazzoni /* Enable the port by setting the port enable bit of the MAC control register */ 1349c5aff182SThomas Petazzoni static void mvneta_port_enable(struct mvneta_port *pp) 1350c5aff182SThomas Petazzoni { 1351c5aff182SThomas Petazzoni u32 val; 1352c5aff182SThomas Petazzoni 1353c5aff182SThomas Petazzoni /* Enable port */ 1354c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 1355c5aff182SThomas Petazzoni val |= MVNETA_GMAC0_PORT_ENABLE; 1356c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1357c5aff182SThomas Petazzoni } 1358c5aff182SThomas Petazzoni 1359c5aff182SThomas Petazzoni /* Disable the port and wait for about 200 usec before retuning */ 1360c5aff182SThomas Petazzoni static void mvneta_port_disable(struct mvneta_port *pp) 1361c5aff182SThomas Petazzoni { 1362c5aff182SThomas Petazzoni u32 val; 1363c5aff182SThomas Petazzoni 1364c5aff182SThomas Petazzoni /* Reset the Enable bit in the Serial Control Register */ 1365c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 1366c5aff182SThomas Petazzoni val &= ~MVNETA_GMAC0_PORT_ENABLE; 1367c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1368c5aff182SThomas Petazzoni 1369c5aff182SThomas Petazzoni udelay(200); 1370c5aff182SThomas Petazzoni } 1371c5aff182SThomas Petazzoni 1372c5aff182SThomas Petazzoni /* Multicast tables methods */ 1373c5aff182SThomas Petazzoni 1374c5aff182SThomas Petazzoni /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ 1375c5aff182SThomas Petazzoni static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) 1376c5aff182SThomas Petazzoni { 1377c5aff182SThomas Petazzoni int offset; 1378c5aff182SThomas Petazzoni u32 val; 1379c5aff182SThomas Petazzoni 1380c5aff182SThomas Petazzoni if (queue == -1) { 1381c5aff182SThomas Petazzoni val = 0; 1382c5aff182SThomas Petazzoni } else { 1383c5aff182SThomas Petazzoni val = 0x1 | (queue << 1); 1384c5aff182SThomas Petazzoni val |= (val << 24) | (val << 16) | (val << 8); 1385c5aff182SThomas Petazzoni } 1386c5aff182SThomas Petazzoni 1387c5aff182SThomas Petazzoni for (offset = 0; offset <= 0xc; offset += 4) 1388c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); 1389c5aff182SThomas Petazzoni } 1390c5aff182SThomas Petazzoni 1391c5aff182SThomas Petazzoni /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ 1392c5aff182SThomas Petazzoni static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) 1393c5aff182SThomas Petazzoni { 1394c5aff182SThomas Petazzoni int offset; 1395c5aff182SThomas Petazzoni u32 val; 1396c5aff182SThomas Petazzoni 1397c5aff182SThomas Petazzoni if (queue == -1) { 1398c5aff182SThomas Petazzoni val = 0; 1399c5aff182SThomas Petazzoni } else { 1400c5aff182SThomas Petazzoni val = 0x1 | (queue << 1); 1401c5aff182SThomas Petazzoni val |= (val << 24) | (val << 16) | (val << 8); 1402c5aff182SThomas Petazzoni } 1403c5aff182SThomas Petazzoni 1404c5aff182SThomas Petazzoni for (offset = 0; offset <= 0xfc; offset += 4) 1405c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); 1406c5aff182SThomas Petazzoni 1407c5aff182SThomas Petazzoni } 1408c5aff182SThomas Petazzoni 1409c5aff182SThomas Petazzoni /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ 1410c5aff182SThomas Petazzoni static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) 1411c5aff182SThomas Petazzoni { 1412c5aff182SThomas Petazzoni int offset; 1413c5aff182SThomas Petazzoni u32 val; 1414c5aff182SThomas Petazzoni 1415c5aff182SThomas Petazzoni if (queue == -1) { 1416c5aff182SThomas Petazzoni memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); 1417c5aff182SThomas Petazzoni val = 0; 1418c5aff182SThomas Petazzoni } else { 1419c5aff182SThomas Petazzoni memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); 1420c5aff182SThomas Petazzoni val = 0x1 | (queue << 1); 1421c5aff182SThomas Petazzoni val |= (val << 24) | (val << 16) | (val << 8); 1422c5aff182SThomas Petazzoni } 1423c5aff182SThomas Petazzoni 1424c5aff182SThomas Petazzoni for (offset = 0; offset <= 0xfc; offset += 4) 1425c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); 1426c5aff182SThomas Petazzoni } 1427c5aff182SThomas Petazzoni 1428db488c10SGregory CLEMENT static void mvneta_percpu_unmask_interrupt(void *arg) 1429db488c10SGregory CLEMENT { 1430db488c10SGregory CLEMENT struct mvneta_port *pp = arg; 1431db488c10SGregory CLEMENT 1432db488c10SGregory CLEMENT /* All the queue are unmasked, but actually only the ones 1433db488c10SGregory CLEMENT * mapped to this CPU will be unmasked 1434db488c10SGregory CLEMENT */ 1435db488c10SGregory CLEMENT mvreg_write(pp, MVNETA_INTR_NEW_MASK, 1436db488c10SGregory CLEMENT MVNETA_RX_INTR_MASK_ALL | 1437db488c10SGregory CLEMENT MVNETA_TX_INTR_MASK_ALL | 1438db488c10SGregory CLEMENT MVNETA_MISCINTR_INTR_MASK); 1439db488c10SGregory CLEMENT } 1440db488c10SGregory CLEMENT 1441db488c10SGregory CLEMENT static void mvneta_percpu_mask_interrupt(void *arg) 1442db488c10SGregory CLEMENT { 1443db488c10SGregory CLEMENT struct mvneta_port *pp = arg; 1444db488c10SGregory CLEMENT 1445db488c10SGregory CLEMENT /* All the queue are masked, but actually only the ones 1446db488c10SGregory CLEMENT * mapped to this CPU will be masked 1447db488c10SGregory CLEMENT */ 1448db488c10SGregory CLEMENT mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1449db488c10SGregory CLEMENT mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 1450db488c10SGregory CLEMENT mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 1451db488c10SGregory CLEMENT } 1452db488c10SGregory CLEMENT 1453db488c10SGregory CLEMENT static void mvneta_percpu_clear_intr_cause(void *arg) 1454db488c10SGregory CLEMENT { 1455db488c10SGregory CLEMENT struct mvneta_port *pp = arg; 1456db488c10SGregory CLEMENT 1457db488c10SGregory CLEMENT /* All the queue are cleared, but actually only the ones 1458db488c10SGregory CLEMENT * mapped to this CPU will be cleared 1459db488c10SGregory CLEMENT */ 1460db488c10SGregory CLEMENT mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1461db488c10SGregory CLEMENT mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 1462db488c10SGregory CLEMENT mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 1463db488c10SGregory CLEMENT } 1464db488c10SGregory CLEMENT 1465c5aff182SThomas Petazzoni /* This method sets defaults to the NETA port: 1466c5aff182SThomas Petazzoni * Clears interrupt Cause and Mask registers. 1467c5aff182SThomas Petazzoni * Clears all MAC tables. 1468c5aff182SThomas Petazzoni * Sets defaults to all registers. 1469c5aff182SThomas Petazzoni * Resets RX and TX descriptor rings. 1470c5aff182SThomas Petazzoni * Resets PHY. 1471c5aff182SThomas Petazzoni * This method can be called after mvneta_port_down() to return the port 1472c5aff182SThomas Petazzoni * settings to defaults. 1473c5aff182SThomas Petazzoni */ 1474c5aff182SThomas Petazzoni static void mvneta_defaults_set(struct mvneta_port *pp) 1475c5aff182SThomas Petazzoni { 1476c5aff182SThomas Petazzoni int cpu; 1477c5aff182SThomas Petazzoni int queue; 1478c5aff182SThomas Petazzoni u32 val; 14792dcf75e2SGregory CLEMENT int max_cpu = num_present_cpus(); 1480c5aff182SThomas Petazzoni 1481c5aff182SThomas Petazzoni /* Clear all Cause registers */ 1482db488c10SGregory CLEMENT on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); 1483c5aff182SThomas Petazzoni 1484c5aff182SThomas Petazzoni /* Mask all interrupts */ 1485db488c10SGregory CLEMENT on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 1486c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1487c5aff182SThomas Petazzoni 1488c5aff182SThomas Petazzoni /* Enable MBUS Retry bit16 */ 1489c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); 1490c5aff182SThomas Petazzoni 149150bf8cb6SGregory CLEMENT /* Set CPU queue access map. CPUs are assigned to the RX and 149250bf8cb6SGregory CLEMENT * TX queues modulo their number. If there is only one TX 149350bf8cb6SGregory CLEMENT * queue then it is assigned to the CPU associated to the 149450bf8cb6SGregory CLEMENT * default RX queue. 14956a20c175SThomas Petazzoni */ 14962dcf75e2SGregory CLEMENT for_each_present_cpu(cpu) { 14972dcf75e2SGregory CLEMENT int rxq_map = 0, txq_map = 0; 149850bf8cb6SGregory CLEMENT int rxq, txq; 14992636ac3cSMarcin Wojtas if (!pp->neta_armada3700) { 15002dcf75e2SGregory CLEMENT for (rxq = 0; rxq < rxq_number; rxq++) 15012dcf75e2SGregory CLEMENT if ((rxq % max_cpu) == cpu) 15022dcf75e2SGregory CLEMENT rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 15032dcf75e2SGregory CLEMENT 150450bf8cb6SGregory CLEMENT for (txq = 0; txq < txq_number; txq++) 150550bf8cb6SGregory CLEMENT if ((txq % max_cpu) == cpu) 150650bf8cb6SGregory CLEMENT txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); 150750bf8cb6SGregory CLEMENT 150850bf8cb6SGregory CLEMENT /* With only one TX queue we configure a special case 150950bf8cb6SGregory CLEMENT * which will allow to get all the irq on a single 151050bf8cb6SGregory CLEMENT * CPU 151150bf8cb6SGregory CLEMENT */ 151250bf8cb6SGregory CLEMENT if (txq_number == 1) 151350bf8cb6SGregory CLEMENT txq_map = (cpu == pp->rxq_def) ? 1514*21327f81SKlaus Kudielka MVNETA_CPU_TXQ_ACCESS(0) : 0; 15152dcf75e2SGregory CLEMENT 15162636ac3cSMarcin Wojtas } else { 15172636ac3cSMarcin Wojtas txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 15182636ac3cSMarcin Wojtas rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK; 15192636ac3cSMarcin Wojtas } 15202636ac3cSMarcin Wojtas 15212dcf75e2SGregory CLEMENT mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 15222dcf75e2SGregory CLEMENT } 1523c5aff182SThomas Petazzoni 1524c5aff182SThomas Petazzoni /* Reset RX and TX DMAs */ 1525c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 1526c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 1527c5aff182SThomas Petazzoni 1528c5aff182SThomas Petazzoni /* Disable Legacy WRR, Disable EJP, Release from reset */ 1529c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); 1530c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 1531c5aff182SThomas Petazzoni mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); 1532c5aff182SThomas Petazzoni mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); 1533c5aff182SThomas Petazzoni } 1534c5aff182SThomas Petazzoni 1535c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 1536c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 1537c5aff182SThomas Petazzoni 1538c5aff182SThomas Petazzoni /* Set Port Acceleration Mode */ 1539dc35a10fSMarcin Wojtas if (pp->bm_priv) 1540dc35a10fSMarcin Wojtas /* HW buffer management + legacy parser */ 1541dc35a10fSMarcin Wojtas val = MVNETA_ACC_MODE_EXT2; 1542dc35a10fSMarcin Wojtas else 1543dc35a10fSMarcin Wojtas /* SW buffer management + legacy parser */ 1544dc35a10fSMarcin Wojtas val = MVNETA_ACC_MODE_EXT1; 1545c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_ACC_MODE, val); 1546c5aff182SThomas Petazzoni 1547dc35a10fSMarcin Wojtas if (pp->bm_priv) 1548dc35a10fSMarcin Wojtas mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); 1549dc35a10fSMarcin Wojtas 1550c5aff182SThomas Petazzoni /* Update val of portCfg register accordingly with all RxQueue types */ 155190b74c01SGregory CLEMENT val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 1552c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_CONFIG, val); 1553c5aff182SThomas Petazzoni 1554c5aff182SThomas Petazzoni val = 0; 1555c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); 1556c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); 1557c5aff182SThomas Petazzoni 1558c5aff182SThomas Petazzoni /* Build PORT_SDMA_CONFIG_REG */ 1559c5aff182SThomas Petazzoni val = 0; 1560c5aff182SThomas Petazzoni 1561c5aff182SThomas Petazzoni /* Default burst size */ 1562c5aff182SThomas Petazzoni val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 1563c5aff182SThomas Petazzoni val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 15649ad8fef6SThomas Petazzoni val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; 1565c5aff182SThomas Petazzoni 15669ad8fef6SThomas Petazzoni #if defined(__BIG_ENDIAN) 15679ad8fef6SThomas Petazzoni val |= MVNETA_DESC_SWAP; 15689ad8fef6SThomas Petazzoni #endif 1569c5aff182SThomas Petazzoni 1570c5aff182SThomas Petazzoni /* Assign port SDMA configuration */ 1571c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 1572c5aff182SThomas Petazzoni 157371408602SThomas Petazzoni /* Disable PHY polling in hardware, since we're using the 157471408602SThomas Petazzoni * kernel phylib to do this. 157571408602SThomas Petazzoni */ 157671408602SThomas Petazzoni val = mvreg_read(pp, MVNETA_UNIT_CONTROL); 157771408602SThomas Petazzoni val &= ~MVNETA_PHY_POLLING_ENABLE; 157871408602SThomas Petazzoni mvreg_write(pp, MVNETA_UNIT_CONTROL, val); 157971408602SThomas Petazzoni 1580c5aff182SThomas Petazzoni mvneta_set_ucast_table(pp, -1); 1581c5aff182SThomas Petazzoni mvneta_set_special_mcast_table(pp, -1); 1582c5aff182SThomas Petazzoni mvneta_set_other_mcast_table(pp, -1); 1583c5aff182SThomas Petazzoni 1584c5aff182SThomas Petazzoni /* Set port interrupt enable register - default enable all */ 1585c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_INTR_ENABLE, 1586c5aff182SThomas Petazzoni (MVNETA_RXQ_INTR_ENABLE_ALL_MASK 1587c5aff182SThomas Petazzoni | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); 1588e483911fSAndrew Lunn 1589e483911fSAndrew Lunn mvneta_mib_counters_clear(pp); 1590c5aff182SThomas Petazzoni } 1591c5aff182SThomas Petazzoni 1592c5aff182SThomas Petazzoni /* Set max sizes for tx queues */ 1593c5aff182SThomas Petazzoni static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) 1594c5aff182SThomas Petazzoni 1595c5aff182SThomas Petazzoni { 1596c5aff182SThomas Petazzoni u32 val, size, mtu; 1597c5aff182SThomas Petazzoni int queue; 1598c5aff182SThomas Petazzoni 1599c5aff182SThomas Petazzoni mtu = max_tx_size * 8; 1600c5aff182SThomas Petazzoni if (mtu > MVNETA_TX_MTU_MAX) 1601c5aff182SThomas Petazzoni mtu = MVNETA_TX_MTU_MAX; 1602c5aff182SThomas Petazzoni 1603c5aff182SThomas Petazzoni /* Set MTU */ 1604c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TX_MTU); 1605c5aff182SThomas Petazzoni val &= ~MVNETA_TX_MTU_MAX; 1606c5aff182SThomas Petazzoni val |= mtu; 1607c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TX_MTU, val); 1608c5aff182SThomas Petazzoni 1609c5aff182SThomas Petazzoni /* TX token size and all TXQs token size must be larger that MTU */ 1610c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); 1611c5aff182SThomas Petazzoni 1612c5aff182SThomas Petazzoni size = val & MVNETA_TX_TOKEN_SIZE_MAX; 1613c5aff182SThomas Petazzoni if (size < mtu) { 1614c5aff182SThomas Petazzoni size = mtu; 1615c5aff182SThomas Petazzoni val &= ~MVNETA_TX_TOKEN_SIZE_MAX; 1616c5aff182SThomas Petazzoni val |= size; 1617c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); 1618c5aff182SThomas Petazzoni } 1619c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 1620c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); 1621c5aff182SThomas Petazzoni 1622c5aff182SThomas Petazzoni size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; 1623c5aff182SThomas Petazzoni if (size < mtu) { 1624c5aff182SThomas Petazzoni size = mtu; 1625c5aff182SThomas Petazzoni val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; 1626c5aff182SThomas Petazzoni val |= size; 1627c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); 1628c5aff182SThomas Petazzoni } 1629c5aff182SThomas Petazzoni } 1630c5aff182SThomas Petazzoni } 1631c5aff182SThomas Petazzoni 1632c5aff182SThomas Petazzoni /* Set unicast address */ 1633c5aff182SThomas Petazzoni static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, 1634c5aff182SThomas Petazzoni int queue) 1635c5aff182SThomas Petazzoni { 1636c5aff182SThomas Petazzoni unsigned int unicast_reg; 1637c5aff182SThomas Petazzoni unsigned int tbl_offset; 1638c5aff182SThomas Petazzoni unsigned int reg_offset; 1639c5aff182SThomas Petazzoni 1640c5aff182SThomas Petazzoni /* Locate the Unicast table entry */ 1641c5aff182SThomas Petazzoni last_nibble = (0xf & last_nibble); 1642c5aff182SThomas Petazzoni 1643c5aff182SThomas Petazzoni /* offset from unicast tbl base */ 1644c5aff182SThomas Petazzoni tbl_offset = (last_nibble / 4) * 4; 1645c5aff182SThomas Petazzoni 1646c5aff182SThomas Petazzoni /* offset within the above reg */ 1647c5aff182SThomas Petazzoni reg_offset = last_nibble % 4; 1648c5aff182SThomas Petazzoni 1649c5aff182SThomas Petazzoni unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); 1650c5aff182SThomas Petazzoni 1651c5aff182SThomas Petazzoni if (queue == -1) { 1652c5aff182SThomas Petazzoni /* Clear accepts frame bit at specified unicast DA tbl entry */ 1653c5aff182SThomas Petazzoni unicast_reg &= ~(0xff << (8 * reg_offset)); 1654c5aff182SThomas Petazzoni } else { 1655c5aff182SThomas Petazzoni unicast_reg &= ~(0xff << (8 * reg_offset)); 1656c5aff182SThomas Petazzoni unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1657c5aff182SThomas Petazzoni } 1658c5aff182SThomas Petazzoni 1659c5aff182SThomas Petazzoni mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); 1660c5aff182SThomas Petazzoni } 1661c5aff182SThomas Petazzoni 1662c5aff182SThomas Petazzoni /* Set mac address */ 166376660757SJakub Kicinski static void mvneta_mac_addr_set(struct mvneta_port *pp, 166476660757SJakub Kicinski const unsigned char *addr, int queue) 1665c5aff182SThomas Petazzoni { 1666c5aff182SThomas Petazzoni unsigned int mac_h; 1667c5aff182SThomas Petazzoni unsigned int mac_l; 1668c5aff182SThomas Petazzoni 1669c5aff182SThomas Petazzoni if (queue != -1) { 1670c5aff182SThomas Petazzoni mac_l = (addr[4] << 8) | (addr[5]); 1671c5aff182SThomas Petazzoni mac_h = (addr[0] << 24) | (addr[1] << 16) | 1672c5aff182SThomas Petazzoni (addr[2] << 8) | (addr[3] << 0); 1673c5aff182SThomas Petazzoni 1674c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); 1675c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); 1676c5aff182SThomas Petazzoni } 1677c5aff182SThomas Petazzoni 1678c5aff182SThomas Petazzoni /* Accept frames of this address */ 1679c5aff182SThomas Petazzoni mvneta_set_ucast_addr(pp, addr[5], queue); 1680c5aff182SThomas Petazzoni } 1681c5aff182SThomas Petazzoni 16826a20c175SThomas Petazzoni /* Set the number of packets that will be received before RX interrupt 16836a20c175SThomas Petazzoni * will be generated by HW. 1684c5aff182SThomas Petazzoni */ 1685c5aff182SThomas Petazzoni static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, 1686c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, u32 value) 1687c5aff182SThomas Petazzoni { 1688c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), 1689c5aff182SThomas Petazzoni value | MVNETA_RXQ_NON_OCCUPIED(0)); 1690c5aff182SThomas Petazzoni } 1691c5aff182SThomas Petazzoni 16926a20c175SThomas Petazzoni /* Set the time delay in usec before RX interrupt will be generated by 16936a20c175SThomas Petazzoni * HW. 1694c5aff182SThomas Petazzoni */ 1695c5aff182SThomas Petazzoni static void mvneta_rx_time_coal_set(struct mvneta_port *pp, 1696c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq, u32 value) 1697c5aff182SThomas Petazzoni { 1698189dd626SThomas Petazzoni u32 val; 1699189dd626SThomas Petazzoni unsigned long clk_rate; 1700189dd626SThomas Petazzoni 1701189dd626SThomas Petazzoni clk_rate = clk_get_rate(pp->clk); 1702189dd626SThomas Petazzoni val = (clk_rate / 1000000) * value; 1703c5aff182SThomas Petazzoni 1704c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); 1705c5aff182SThomas Petazzoni } 1706c5aff182SThomas Petazzoni 1707c5aff182SThomas Petazzoni /* Set threshold for TX_DONE pkts coalescing */ 1708c5aff182SThomas Petazzoni static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, 1709c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq, u32 value) 1710c5aff182SThomas Petazzoni { 1711c5aff182SThomas Petazzoni u32 val; 1712c5aff182SThomas Petazzoni 1713c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); 1714c5aff182SThomas Petazzoni 1715c5aff182SThomas Petazzoni val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; 1716c5aff182SThomas Petazzoni val |= MVNETA_TXQ_SENT_THRESH_MASK(value); 1717c5aff182SThomas Petazzoni 1718c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); 1719c5aff182SThomas Petazzoni } 1720c5aff182SThomas Petazzoni 1721c5aff182SThomas Petazzoni /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ 1722c5aff182SThomas Petazzoni static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, 1723f88bee1cSGregory CLEMENT u32 phys_addr, void *virt_addr, 1724f88bee1cSGregory CLEMENT struct mvneta_rx_queue *rxq) 1725c5aff182SThomas Petazzoni { 1726f88bee1cSGregory CLEMENT int i; 1727f88bee1cSGregory CLEMENT 1728c5aff182SThomas Petazzoni rx_desc->buf_phys_addr = phys_addr; 1729f88bee1cSGregory CLEMENT i = rx_desc - rxq->descs; 1730f88bee1cSGregory CLEMENT rxq->buf_virt_addr[i] = virt_addr; 1731c5aff182SThomas Petazzoni } 1732c5aff182SThomas Petazzoni 1733c5aff182SThomas Petazzoni /* Decrement sent descriptors counter */ 1734c5aff182SThomas Petazzoni static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, 1735c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq, 1736c5aff182SThomas Petazzoni int sent_desc) 1737c5aff182SThomas Petazzoni { 1738c5aff182SThomas Petazzoni u32 val; 1739c5aff182SThomas Petazzoni 1740c5aff182SThomas Petazzoni /* Only 255 TX descriptors can be updated at once */ 1741c5aff182SThomas Petazzoni while (sent_desc > 0xff) { 1742c5aff182SThomas Petazzoni val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; 1743c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1744c5aff182SThomas Petazzoni sent_desc = sent_desc - 0xff; 1745c5aff182SThomas Petazzoni } 1746c5aff182SThomas Petazzoni 1747c5aff182SThomas Petazzoni val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; 1748c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1749c5aff182SThomas Petazzoni } 1750c5aff182SThomas Petazzoni 1751c5aff182SThomas Petazzoni /* Get number of TX descriptors already sent by HW */ 1752c5aff182SThomas Petazzoni static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, 1753c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 1754c5aff182SThomas Petazzoni { 1755c5aff182SThomas Petazzoni u32 val; 1756c5aff182SThomas Petazzoni int sent_desc; 1757c5aff182SThomas Petazzoni 1758c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); 1759c5aff182SThomas Petazzoni sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> 1760c5aff182SThomas Petazzoni MVNETA_TXQ_SENT_DESC_SHIFT; 1761c5aff182SThomas Petazzoni 1762c5aff182SThomas Petazzoni return sent_desc; 1763c5aff182SThomas Petazzoni } 1764c5aff182SThomas Petazzoni 17656a20c175SThomas Petazzoni /* Get number of sent descriptors and decrement counter. 1766c5aff182SThomas Petazzoni * The number of sent descriptors is returned. 1767c5aff182SThomas Petazzoni */ 1768c5aff182SThomas Petazzoni static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, 1769c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 1770c5aff182SThomas Petazzoni { 1771c5aff182SThomas Petazzoni int sent_desc; 1772c5aff182SThomas Petazzoni 1773c5aff182SThomas Petazzoni /* Get number of sent descriptors */ 1774c5aff182SThomas Petazzoni sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); 1775c5aff182SThomas Petazzoni 1776c5aff182SThomas Petazzoni /* Decrement sent descriptors counter */ 1777c5aff182SThomas Petazzoni if (sent_desc) 1778c5aff182SThomas Petazzoni mvneta_txq_sent_desc_dec(pp, txq, sent_desc); 1779c5aff182SThomas Petazzoni 1780c5aff182SThomas Petazzoni return sent_desc; 1781c5aff182SThomas Petazzoni } 1782c5aff182SThomas Petazzoni 1783c5aff182SThomas Petazzoni /* Set TXQ descriptors fields relevant for CSUM calculation */ 1784c5aff182SThomas Petazzoni static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, 1785c5aff182SThomas Petazzoni int ip_hdr_len, int l4_proto) 1786c5aff182SThomas Petazzoni { 1787c5aff182SThomas Petazzoni u32 command; 1788c5aff182SThomas Petazzoni 1789c5aff182SThomas Petazzoni /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 17906a20c175SThomas Petazzoni * G_L4_chk, L4_type; required only for checksum 17916a20c175SThomas Petazzoni * calculation 17926a20c175SThomas Petazzoni */ 1793c5aff182SThomas Petazzoni command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1794c5aff182SThomas Petazzoni command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1795c5aff182SThomas Petazzoni 17960a198587SThomas Fitzsimmons if (l3_proto == htons(ETH_P_IP)) 1797c5aff182SThomas Petazzoni command |= MVNETA_TXD_IP_CSUM; 1798c5aff182SThomas Petazzoni else 1799c5aff182SThomas Petazzoni command |= MVNETA_TX_L3_IP6; 1800c5aff182SThomas Petazzoni 1801c5aff182SThomas Petazzoni if (l4_proto == IPPROTO_TCP) 1802c5aff182SThomas Petazzoni command |= MVNETA_TX_L4_CSUM_FULL; 1803c5aff182SThomas Petazzoni else if (l4_proto == IPPROTO_UDP) 1804c5aff182SThomas Petazzoni command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; 1805c5aff182SThomas Petazzoni else 1806c5aff182SThomas Petazzoni command |= MVNETA_TX_L4_CSUM_NOT; 1807c5aff182SThomas Petazzoni 1808c5aff182SThomas Petazzoni return command; 1809c5aff182SThomas Petazzoni } 1810c5aff182SThomas Petazzoni 1811c5aff182SThomas Petazzoni 1812c5aff182SThomas Petazzoni /* Display more error info */ 1813c5aff182SThomas Petazzoni static void mvneta_rx_error(struct mvneta_port *pp, 1814c5aff182SThomas Petazzoni struct mvneta_rx_desc *rx_desc) 1815c5aff182SThomas Petazzoni { 1816c35947b8SLorenzo Bianconi struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1817c5aff182SThomas Petazzoni u32 status = rx_desc->status; 1818c5aff182SThomas Petazzoni 1819c35947b8SLorenzo Bianconi /* update per-cpu counter */ 1820c35947b8SLorenzo Bianconi u64_stats_update_begin(&stats->syncp); 1821c35947b8SLorenzo Bianconi stats->rx_errors++; 1822c35947b8SLorenzo Bianconi u64_stats_update_end(&stats->syncp); 1823c35947b8SLorenzo Bianconi 1824c5aff182SThomas Petazzoni switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1825c5aff182SThomas Petazzoni case MVNETA_RXD_ERR_CRC: 1826c5aff182SThomas Petazzoni netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", 1827c5aff182SThomas Petazzoni status, rx_desc->data_size); 1828c5aff182SThomas Petazzoni break; 1829c5aff182SThomas Petazzoni case MVNETA_RXD_ERR_OVERRUN: 1830c5aff182SThomas Petazzoni netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", 1831c5aff182SThomas Petazzoni status, rx_desc->data_size); 1832c5aff182SThomas Petazzoni break; 1833c5aff182SThomas Petazzoni case MVNETA_RXD_ERR_LEN: 1834c5aff182SThomas Petazzoni netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", 1835c5aff182SThomas Petazzoni status, rx_desc->data_size); 1836c5aff182SThomas Petazzoni break; 1837c5aff182SThomas Petazzoni case MVNETA_RXD_ERR_RESOURCE: 1838c5aff182SThomas Petazzoni netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", 1839c5aff182SThomas Petazzoni status, rx_desc->data_size); 1840c5aff182SThomas Petazzoni break; 1841c5aff182SThomas Petazzoni } 1842c5aff182SThomas Petazzoni } 1843c5aff182SThomas Petazzoni 18445428213cSwilly tarreau /* Handle RX checksum offload based on the descriptor's status */ 1845aff0824dSLorenzo Bianconi static int mvneta_rx_csum(struct mvneta_port *pp, u32 status) 1846c5aff182SThomas Petazzoni { 1847f945cec8SYelena Krivosheev if ((pp->dev->features & NETIF_F_RXCSUM) && 1848f945cec8SYelena Krivosheev (status & MVNETA_RXD_L3_IP4) && 1849aff0824dSLorenzo Bianconi (status & MVNETA_RXD_L4_CSUM_OK)) 1850aff0824dSLorenzo Bianconi return CHECKSUM_UNNECESSARY; 1851c5aff182SThomas Petazzoni 1852aff0824dSLorenzo Bianconi return CHECKSUM_NONE; 1853c5aff182SThomas Petazzoni } 1854c5aff182SThomas Petazzoni 18556c498974Swilly tarreau /* Return tx queue pointer (find last set bit) according to <cause> returned 18566c498974Swilly tarreau * form tx_done reg. <cause> must not be null. The return value is always a 18576c498974Swilly tarreau * valid queue for matching the first one found in <cause>. 18586c498974Swilly tarreau */ 1859c5aff182SThomas Petazzoni static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, 1860c5aff182SThomas Petazzoni u32 cause) 1861c5aff182SThomas Petazzoni { 1862c5aff182SThomas Petazzoni int queue = fls(cause) - 1; 1863c5aff182SThomas Petazzoni 18646c498974Swilly tarreau return &pp->txqs[queue]; 1865c5aff182SThomas Petazzoni } 1866c5aff182SThomas Petazzoni 1867c5aff182SThomas Petazzoni /* Free tx queue skbuffs */ 1868c5aff182SThomas Petazzoni static void mvneta_txq_bufs_free(struct mvneta_port *pp, 1869a29b6235SMarcin Wojtas struct mvneta_tx_queue *txq, int num, 1870632bb64fSLorenzo Bianconi struct netdev_queue *nq, bool napi) 1871c5aff182SThomas Petazzoni { 1872a29b6235SMarcin Wojtas unsigned int bytes_compl = 0, pkts_compl = 0; 18732f9d0939SLorenzo Bianconi struct xdp_frame_bulk bq; 1874c5aff182SThomas Petazzoni int i; 1875c5aff182SThomas Petazzoni 18762f9d0939SLorenzo Bianconi xdp_frame_bulk_init(&bq); 18772f9d0939SLorenzo Bianconi 18782f9d0939SLorenzo Bianconi rcu_read_lock(); /* need for xdp_return_frame_bulk */ 18792f9d0939SLorenzo Bianconi 1880c5aff182SThomas Petazzoni for (i = 0; i < num; i++) { 18819e58c8b4SLorenzo Bianconi struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; 1882c5aff182SThomas Petazzoni struct mvneta_tx_desc *tx_desc = txq->descs + 1883c5aff182SThomas Petazzoni txq->txq_get_index; 1884a29b6235SMarcin Wojtas 1885c5aff182SThomas Petazzoni mvneta_txq_inc_get(txq); 1886c5aff182SThomas Petazzoni 1887f00ba4f4SRussell King (Oracle) if (buf->type == MVNETA_TYPE_XDP_NDO || 1888f00ba4f4SRussell King (Oracle) buf->type == MVNETA_TYPE_SKB) 18892e3173a3SEzequiel Garcia dma_unmap_single(pp->dev->dev.parent, 18902e3173a3SEzequiel Garcia tx_desc->buf_phys_addr, 1891c5aff182SThomas Petazzoni tx_desc->data_size, DMA_TO_DEVICE); 1892b0bd1b07SRussell King (Oracle) if ((buf->type == MVNETA_TYPE_TSO || 1893b0bd1b07SRussell King (Oracle) buf->type == MVNETA_TYPE_SKB) && buf->skb) { 18949e58c8b4SLorenzo Bianconi bytes_compl += buf->skb->len; 18959e58c8b4SLorenzo Bianconi pkts_compl++; 18969e58c8b4SLorenzo Bianconi dev_kfree_skb_any(buf->skb); 1897c41ced02SLorenzo Bianconi } else if ((buf->type == MVNETA_TYPE_XDP_TX || 1898c41ced02SLorenzo Bianconi buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) { 1899632bb64fSLorenzo Bianconi if (napi && buf->type == MVNETA_TYPE_XDP_TX) 1900632bb64fSLorenzo Bianconi xdp_return_frame_rx_napi(buf->xdpf); 1901632bb64fSLorenzo Bianconi else 19022f9d0939SLorenzo Bianconi xdp_return_frame_bulk(buf->xdpf, &bq); 1903b0a43db9SLorenzo Bianconi } 1904c5aff182SThomas Petazzoni } 19052f9d0939SLorenzo Bianconi xdp_flush_frame_bulk(&bq); 19062f9d0939SLorenzo Bianconi 19072f9d0939SLorenzo Bianconi rcu_read_unlock(); 1908a29b6235SMarcin Wojtas 1909a29b6235SMarcin Wojtas netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); 1910c5aff182SThomas Petazzoni } 1911c5aff182SThomas Petazzoni 1912c5aff182SThomas Petazzoni /* Handle end of transmission */ 1913cd713199SArnaud Ebalard static void mvneta_txq_done(struct mvneta_port *pp, 1914c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 1915c5aff182SThomas Petazzoni { 1916c5aff182SThomas Petazzoni struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 1917c5aff182SThomas Petazzoni int tx_done; 1918c5aff182SThomas Petazzoni 1919c5aff182SThomas Petazzoni tx_done = mvneta_txq_sent_desc_proc(pp, txq); 1920cd713199SArnaud Ebalard if (!tx_done) 1921cd713199SArnaud Ebalard return; 1922cd713199SArnaud Ebalard 1923632bb64fSLorenzo Bianconi mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); 1924c5aff182SThomas Petazzoni 1925c5aff182SThomas Petazzoni txq->count -= tx_done; 1926c5aff182SThomas Petazzoni 1927c5aff182SThomas Petazzoni if (netif_tx_queue_stopped(nq)) { 19288eef5f97SEzequiel Garcia if (txq->count <= txq->tx_wake_threshold) 1929c5aff182SThomas Petazzoni netif_tx_wake_queue(nq); 1930c5aff182SThomas Petazzoni } 1931c5aff182SThomas Petazzoni } 1932c5aff182SThomas Petazzoni 1933dc35a10fSMarcin Wojtas /* Refill processing for SW buffer management */ 19347e47fd84SGregory CLEMENT /* Allocate page per descriptor */ 1935c5aff182SThomas Petazzoni static int mvneta_rx_refill(struct mvneta_port *pp, 1936f88bee1cSGregory CLEMENT struct mvneta_rx_desc *rx_desc, 19377e47fd84SGregory CLEMENT struct mvneta_rx_queue *rxq, 19387e47fd84SGregory CLEMENT gfp_t gfp_mask) 1939c5aff182SThomas Petazzoni { 1940c5aff182SThomas Petazzoni dma_addr_t phys_addr; 19417e47fd84SGregory CLEMENT struct page *page; 1942c5aff182SThomas Petazzoni 1943568a3fa2SLorenzo Bianconi page = page_pool_alloc_pages(rxq->page_pool, 1944568a3fa2SLorenzo Bianconi gfp_mask | __GFP_NOWARN); 19457e47fd84SGregory CLEMENT if (!page) 1946c5aff182SThomas Petazzoni return -ENOMEM; 1947c5aff182SThomas Petazzoni 1948568a3fa2SLorenzo Bianconi phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; 19497e47fd84SGregory CLEMENT mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); 1950568a3fa2SLorenzo Bianconi 1951c5aff182SThomas Petazzoni return 0; 1952c5aff182SThomas Petazzoni } 1953c5aff182SThomas Petazzoni 1954c5aff182SThomas Petazzoni /* Handle tx checksum */ 195520d446f2SYuval Shaia static u32 mvneta_skb_tx_csum(struct sk_buff *skb) 1956c5aff182SThomas Petazzoni { 1957c5aff182SThomas Petazzoni if (skb->ip_summed == CHECKSUM_PARTIAL) { 1958c5aff182SThomas Petazzoni int ip_hdr_len = 0; 1959817dbfa5SVlad Yasevich __be16 l3_proto = vlan_get_protocol(skb); 1960c5aff182SThomas Petazzoni u8 l4_proto; 1961c5aff182SThomas Petazzoni 1962817dbfa5SVlad Yasevich if (l3_proto == htons(ETH_P_IP)) { 1963c5aff182SThomas Petazzoni struct iphdr *ip4h = ip_hdr(skb); 1964c5aff182SThomas Petazzoni 1965c5aff182SThomas Petazzoni /* Calculate IPv4 checksum and L4 checksum */ 1966c5aff182SThomas Petazzoni ip_hdr_len = ip4h->ihl; 1967c5aff182SThomas Petazzoni l4_proto = ip4h->protocol; 1968817dbfa5SVlad Yasevich } else if (l3_proto == htons(ETH_P_IPV6)) { 1969c5aff182SThomas Petazzoni struct ipv6hdr *ip6h = ipv6_hdr(skb); 1970c5aff182SThomas Petazzoni 1971c5aff182SThomas Petazzoni /* Read l4_protocol from one of IPv6 extra headers */ 1972c5aff182SThomas Petazzoni if (skb_network_header_len(skb) > 0) 1973c5aff182SThomas Petazzoni ip_hdr_len = (skb_network_header_len(skb) >> 2); 1974c5aff182SThomas Petazzoni l4_proto = ip6h->nexthdr; 1975c5aff182SThomas Petazzoni } else 1976c5aff182SThomas Petazzoni return MVNETA_TX_L4_CSUM_NOT; 1977c5aff182SThomas Petazzoni 1978c5aff182SThomas Petazzoni return mvneta_txq_desc_csum(skb_network_offset(skb), 1979817dbfa5SVlad Yasevich l3_proto, ip_hdr_len, l4_proto); 1980c5aff182SThomas Petazzoni } 1981c5aff182SThomas Petazzoni 1982c5aff182SThomas Petazzoni return MVNETA_TX_L4_CSUM_NOT; 1983c5aff182SThomas Petazzoni } 1984c5aff182SThomas Petazzoni 1985c5aff182SThomas Petazzoni /* Drop packets received by the RXQ and free buffers */ 1986c5aff182SThomas Petazzoni static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, 1987c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 1988c5aff182SThomas Petazzoni { 1989c5aff182SThomas Petazzoni int rx_done, i; 1990c5aff182SThomas Petazzoni 1991c5aff182SThomas Petazzoni rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1992dc35a10fSMarcin Wojtas if (rx_done) 1993dc35a10fSMarcin Wojtas mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1994dc35a10fSMarcin Wojtas 1995dc35a10fSMarcin Wojtas if (pp->bm_priv) { 1996dc35a10fSMarcin Wojtas for (i = 0; i < rx_done; i++) { 1997dc35a10fSMarcin Wojtas struct mvneta_rx_desc *rx_desc = 1998dc35a10fSMarcin Wojtas mvneta_rxq_next_desc_get(rxq); 1999dc35a10fSMarcin Wojtas u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); 2000dc35a10fSMarcin Wojtas struct mvneta_bm_pool *bm_pool; 2001dc35a10fSMarcin Wojtas 2002dc35a10fSMarcin Wojtas bm_pool = &pp->bm_priv->bm_pools[pool_id]; 2003dc35a10fSMarcin Wojtas /* Return dropped buffer to the pool */ 2004dc35a10fSMarcin Wojtas mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2005dc35a10fSMarcin Wojtas rx_desc->buf_phys_addr); 2006dc35a10fSMarcin Wojtas } 2007dc35a10fSMarcin Wojtas return; 2008dc35a10fSMarcin Wojtas } 2009dc35a10fSMarcin Wojtas 2010c5aff182SThomas Petazzoni for (i = 0; i < rxq->size; i++) { 2011c5aff182SThomas Petazzoni struct mvneta_rx_desc *rx_desc = rxq->descs + i; 2012f88bee1cSGregory CLEMENT void *data = rxq->buf_virt_addr[i]; 2013562e2f46SYelena Krivosheev if (!data || !(rx_desc->buf_phys_addr)) 2014562e2f46SYelena Krivosheev continue; 2015c5aff182SThomas Petazzoni 2016458de8a9SIlias Apalodimas page_pool_put_full_page(rxq->page_pool, data, false); 2017dc35a10fSMarcin Wojtas } 2018568a3fa2SLorenzo Bianconi if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 2019568a3fa2SLorenzo Bianconi xdp_rxq_info_unreg(&rxq->xdp_rxq); 2020568a3fa2SLorenzo Bianconi page_pool_destroy(rxq->page_pool); 2021568a3fa2SLorenzo Bianconi rxq->page_pool = NULL; 2022c5aff182SThomas Petazzoni } 2023c5aff182SThomas Petazzoni 2024ff519e2aSLorenzo Bianconi static void 2025320d5441SLorenzo Bianconi mvneta_update_stats(struct mvneta_port *pp, 2026320d5441SLorenzo Bianconi struct mvneta_stats *ps) 2027ff519e2aSLorenzo Bianconi { 2028ff519e2aSLorenzo Bianconi struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2029ff519e2aSLorenzo Bianconi 2030ff519e2aSLorenzo Bianconi u64_stats_update_begin(&stats->syncp); 2031320d5441SLorenzo Bianconi stats->es.ps.rx_packets += ps->rx_packets; 2032320d5441SLorenzo Bianconi stats->es.ps.rx_bytes += ps->rx_bytes; 20333d866523SLorenzo Bianconi /* xdp */ 20343d866523SLorenzo Bianconi stats->es.ps.xdp_redirect += ps->xdp_redirect; 20353d866523SLorenzo Bianconi stats->es.ps.xdp_pass += ps->xdp_pass; 20363d866523SLorenzo Bianconi stats->es.ps.xdp_drop += ps->xdp_drop; 2037ff519e2aSLorenzo Bianconi u64_stats_update_end(&stats->syncp); 2038ff519e2aSLorenzo Bianconi } 2039ff519e2aSLorenzo Bianconi 2040562e2f46SYelena Krivosheev static inline 2041562e2f46SYelena Krivosheev int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) 2042562e2f46SYelena Krivosheev { 2043562e2f46SYelena Krivosheev struct mvneta_rx_desc *rx_desc; 2044562e2f46SYelena Krivosheev int curr_desc = rxq->first_to_refill; 2045562e2f46SYelena Krivosheev int i; 2046562e2f46SYelena Krivosheev 2047562e2f46SYelena Krivosheev for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { 2048562e2f46SYelena Krivosheev rx_desc = rxq->descs + curr_desc; 2049562e2f46SYelena Krivosheev if (!(rx_desc->buf_phys_addr)) { 2050562e2f46SYelena Krivosheev if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { 20519ac41f3cSLorenzo Bianconi struct mvneta_pcpu_stats *stats; 20529ac41f3cSLorenzo Bianconi 2053562e2f46SYelena Krivosheev pr_err("Can't refill queue %d. Done %d from %d\n", 2054562e2f46SYelena Krivosheev rxq->id, i, rxq->refill_num); 20559ac41f3cSLorenzo Bianconi 20569ac41f3cSLorenzo Bianconi stats = this_cpu_ptr(pp->stats); 20579ac41f3cSLorenzo Bianconi u64_stats_update_begin(&stats->syncp); 20589ac41f3cSLorenzo Bianconi stats->es.refill_error++; 20599ac41f3cSLorenzo Bianconi u64_stats_update_end(&stats->syncp); 2060562e2f46SYelena Krivosheev break; 2061562e2f46SYelena Krivosheev } 2062562e2f46SYelena Krivosheev } 2063562e2f46SYelena Krivosheev curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); 2064562e2f46SYelena Krivosheev } 2065562e2f46SYelena Krivosheev rxq->refill_num -= i; 2066562e2f46SYelena Krivosheev rxq->first_to_refill = curr_desc; 2067562e2f46SYelena Krivosheev 2068562e2f46SYelena Krivosheev return i; 2069562e2f46SYelena Krivosheev } 2070562e2f46SYelena Krivosheev 2071ca0e0146SLorenzo Bianconi static void 2072ca0e0146SLorenzo Bianconi mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2073d094c985SLorenzo Bianconi struct xdp_buff *xdp, int sync_len) 2074ca0e0146SLorenzo Bianconi { 2075d094c985SLorenzo Bianconi struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2076ca0e0146SLorenzo Bianconi int i; 2077ca0e0146SLorenzo Bianconi 207876a67694SLorenzo Bianconi if (likely(!xdp_buff_has_frags(xdp))) 207976a67694SLorenzo Bianconi goto out; 208076a67694SLorenzo Bianconi 2081ca0e0146SLorenzo Bianconi for (i = 0; i < sinfo->nr_frags; i++) 2082ca0e0146SLorenzo Bianconi page_pool_put_full_page(rxq->page_pool, 2083eb33f118SLorenzo Bianconi skb_frag_page(&sinfo->frags[i]), true); 208476a67694SLorenzo Bianconi 208576a67694SLorenzo Bianconi out: 20869d3b2d3eSLorenzo Bianconi page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), 2087eb33f118SLorenzo Bianconi sync_len, true); 2088ca0e0146SLorenzo Bianconi } 2089ca0e0146SLorenzo Bianconi 20908dc9a088SLorenzo Bianconi static int 2091b0a43db9SLorenzo Bianconi mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, 2092c41ced02SLorenzo Bianconi struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map) 2093b0a43db9SLorenzo Bianconi { 2094c41ced02SLorenzo Bianconi struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); 2095c41ced02SLorenzo Bianconi struct device *dev = pp->dev->dev.parent; 2096c52db246SLorenzo Bianconi struct mvneta_tx_desc *tx_desc; 2097c41ced02SLorenzo Bianconi int i, num_frames = 1; 2098c41ced02SLorenzo Bianconi struct page *page; 2099c41ced02SLorenzo Bianconi 2100c41ced02SLorenzo Bianconi if (unlikely(xdp_frame_has_frags(xdpf))) 2101c41ced02SLorenzo Bianconi num_frames += sinfo->nr_frags; 2102c41ced02SLorenzo Bianconi 2103c41ced02SLorenzo Bianconi if (txq->count + num_frames >= txq->size) 2104c41ced02SLorenzo Bianconi return MVNETA_XDP_DROPPED; 2105c41ced02SLorenzo Bianconi 2106c41ced02SLorenzo Bianconi for (i = 0; i < num_frames; i++) { 2107c41ced02SLorenzo Bianconi struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2108c41ced02SLorenzo Bianconi skb_frag_t *frag = NULL; 2109c41ced02SLorenzo Bianconi int len = xdpf->len; 2110b0a43db9SLorenzo Bianconi dma_addr_t dma_addr; 2111b0a43db9SLorenzo Bianconi 2112c41ced02SLorenzo Bianconi if (unlikely(i)) { /* paged area */ 2113c41ced02SLorenzo Bianconi frag = &sinfo->frags[i - 1]; 2114c41ced02SLorenzo Bianconi len = skb_frag_size(frag); 2115c41ced02SLorenzo Bianconi } 2116b0a43db9SLorenzo Bianconi 2117b0a43db9SLorenzo Bianconi tx_desc = mvneta_txq_next_desc_get(txq); 2118b0a43db9SLorenzo Bianconi if (dma_map) { 2119b0a43db9SLorenzo Bianconi /* ndo_xdp_xmit */ 2120c41ced02SLorenzo Bianconi void *data; 2121c41ced02SLorenzo Bianconi 2122c41ced02SLorenzo Bianconi data = unlikely(frag) ? skb_frag_address(frag) 2123c41ced02SLorenzo Bianconi : xdpf->data; 2124c41ced02SLorenzo Bianconi dma_addr = dma_map_single(dev, data, len, 2125c41ced02SLorenzo Bianconi DMA_TO_DEVICE); 2126c41ced02SLorenzo Bianconi if (dma_mapping_error(dev, dma_addr)) { 2127b0a43db9SLorenzo Bianconi mvneta_txq_desc_put(txq); 2128c41ced02SLorenzo Bianconi goto unmap; 2129b0a43db9SLorenzo Bianconi } 2130c41ced02SLorenzo Bianconi 2131b0a43db9SLorenzo Bianconi buf->type = MVNETA_TYPE_XDP_NDO; 2132b0a43db9SLorenzo Bianconi } else { 2133c41ced02SLorenzo Bianconi page = unlikely(frag) ? skb_frag_page(frag) 2134c41ced02SLorenzo Bianconi : virt_to_page(xdpf->data); 2135c41ced02SLorenzo Bianconi dma_addr = page_pool_get_dma_addr(page); 2136c41ced02SLorenzo Bianconi if (unlikely(frag)) 2137c41ced02SLorenzo Bianconi dma_addr += skb_frag_off(frag); 2138c41ced02SLorenzo Bianconi else 2139c41ced02SLorenzo Bianconi dma_addr += sizeof(*xdpf) + xdpf->headroom; 2140c41ced02SLorenzo Bianconi dma_sync_single_for_device(dev, dma_addr, len, 2141c41ced02SLorenzo Bianconi DMA_BIDIRECTIONAL); 2142b0a43db9SLorenzo Bianconi buf->type = MVNETA_TYPE_XDP_TX; 2143b0a43db9SLorenzo Bianconi } 2144c41ced02SLorenzo Bianconi buf->xdpf = unlikely(i) ? NULL : xdpf; 2145b0a43db9SLorenzo Bianconi 2146c41ced02SLorenzo Bianconi tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC; 2147b0a43db9SLorenzo Bianconi tx_desc->buf_phys_addr = dma_addr; 2148c41ced02SLorenzo Bianconi tx_desc->data_size = len; 2149c41ced02SLorenzo Bianconi *nxmit_byte += len; 2150b0a43db9SLorenzo Bianconi 2151b0a43db9SLorenzo Bianconi mvneta_txq_inc_put(txq); 2152c41ced02SLorenzo Bianconi } 2153c41ced02SLorenzo Bianconi /*last descriptor */ 2154c41ced02SLorenzo Bianconi tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 2155c41ced02SLorenzo Bianconi 2156c41ced02SLorenzo Bianconi txq->pending += num_frames; 2157c41ced02SLorenzo Bianconi txq->count += num_frames; 2158b0a43db9SLorenzo Bianconi 2159b0a43db9SLorenzo Bianconi return MVNETA_XDP_TX; 2160c41ced02SLorenzo Bianconi 2161c41ced02SLorenzo Bianconi unmap: 2162c41ced02SLorenzo Bianconi for (i--; i >= 0; i--) { 2163c41ced02SLorenzo Bianconi mvneta_txq_desc_put(txq); 2164c41ced02SLorenzo Bianconi tx_desc = txq->descs + txq->next_desc_to_proc; 2165c41ced02SLorenzo Bianconi dma_unmap_single(dev, tx_desc->buf_phys_addr, 2166c41ced02SLorenzo Bianconi tx_desc->data_size, 2167c41ced02SLorenzo Bianconi DMA_TO_DEVICE); 2168c41ced02SLorenzo Bianconi } 2169c41ced02SLorenzo Bianconi 2170c41ced02SLorenzo Bianconi return MVNETA_XDP_DROPPED; 2171b0a43db9SLorenzo Bianconi } 2172b0a43db9SLorenzo Bianconi 2173b0a43db9SLorenzo Bianconi static int 2174b0a43db9SLorenzo Bianconi mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) 2175b0a43db9SLorenzo Bianconi { 217615070919SJesper Dangaard Brouer struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2177b0a43db9SLorenzo Bianconi struct mvneta_tx_queue *txq; 2178b0a43db9SLorenzo Bianconi struct netdev_queue *nq; 2179c41ced02SLorenzo Bianconi int cpu, nxmit_byte = 0; 2180b0a43db9SLorenzo Bianconi struct xdp_frame *xdpf; 2181b0a43db9SLorenzo Bianconi u32 ret; 2182b0a43db9SLorenzo Bianconi 21831b698fa5SLorenzo Bianconi xdpf = xdp_convert_buff_to_frame(xdp); 2184b0a43db9SLorenzo Bianconi if (unlikely(!xdpf)) 2185b0a43db9SLorenzo Bianconi return MVNETA_XDP_DROPPED; 2186b0a43db9SLorenzo Bianconi 2187b0a43db9SLorenzo Bianconi cpu = smp_processor_id(); 2188b0a43db9SLorenzo Bianconi txq = &pp->txqs[cpu % txq_number]; 2189b0a43db9SLorenzo Bianconi nq = netdev_get_tx_queue(pp->dev, txq->id); 2190b0a43db9SLorenzo Bianconi 2191b0a43db9SLorenzo Bianconi __netif_tx_lock(nq, cpu); 2192c41ced02SLorenzo Bianconi ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false); 21937d51a015SLorenzo Bianconi if (ret == MVNETA_XDP_TX) { 21947d51a015SLorenzo Bianconi u64_stats_update_begin(&stats->syncp); 2195c41ced02SLorenzo Bianconi stats->es.ps.tx_bytes += nxmit_byte; 21967d51a015SLorenzo Bianconi stats->es.ps.tx_packets++; 21977d51a015SLorenzo Bianconi stats->es.ps.xdp_tx++; 21987d51a015SLorenzo Bianconi u64_stats_update_end(&stats->syncp); 21997d51a015SLorenzo Bianconi 2200b0a43db9SLorenzo Bianconi mvneta_txq_pend_desc_add(pp, txq, 0); 220115070919SJesper Dangaard Brouer } else { 220215070919SJesper Dangaard Brouer u64_stats_update_begin(&stats->syncp); 220315070919SJesper Dangaard Brouer stats->es.ps.xdp_tx_err++; 220415070919SJesper Dangaard Brouer u64_stats_update_end(&stats->syncp); 22057d51a015SLorenzo Bianconi } 2206b0a43db9SLorenzo Bianconi __netif_tx_unlock(nq); 2207b0a43db9SLorenzo Bianconi 2208b0a43db9SLorenzo Bianconi return ret; 2209b0a43db9SLorenzo Bianconi } 2210b0a43db9SLorenzo Bianconi 2211b0a43db9SLorenzo Bianconi static int 2212b0a43db9SLorenzo Bianconi mvneta_xdp_xmit(struct net_device *dev, int num_frame, 2213b0a43db9SLorenzo Bianconi struct xdp_frame **frames, u32 flags) 2214b0a43db9SLorenzo Bianconi { 2215b0a43db9SLorenzo Bianconi struct mvneta_port *pp = netdev_priv(dev); 22167d51a015SLorenzo Bianconi struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2217fdc13979SLorenzo Bianconi int i, nxmit_byte = 0, nxmit = 0; 2218b0a43db9SLorenzo Bianconi int cpu = smp_processor_id(); 2219b0a43db9SLorenzo Bianconi struct mvneta_tx_queue *txq; 2220b0a43db9SLorenzo Bianconi struct netdev_queue *nq; 2221b0a43db9SLorenzo Bianconi u32 ret; 2222b0a43db9SLorenzo Bianconi 222362a502ccSLorenzo Bianconi if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) 222462a502ccSLorenzo Bianconi return -ENETDOWN; 222562a502ccSLorenzo Bianconi 2226b0a43db9SLorenzo Bianconi if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2227b0a43db9SLorenzo Bianconi return -EINVAL; 2228b0a43db9SLorenzo Bianconi 2229b0a43db9SLorenzo Bianconi txq = &pp->txqs[cpu % txq_number]; 2230b0a43db9SLorenzo Bianconi nq = netdev_get_tx_queue(pp->dev, txq->id); 2231b0a43db9SLorenzo Bianconi 2232b0a43db9SLorenzo Bianconi __netif_tx_lock(nq, cpu); 2233b0a43db9SLorenzo Bianconi for (i = 0; i < num_frame; i++) { 2234c41ced02SLorenzo Bianconi ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte, 2235c41ced02SLorenzo Bianconi true); 2236fdc13979SLorenzo Bianconi if (ret != MVNETA_XDP_TX) 2237fdc13979SLorenzo Bianconi break; 2238fdc13979SLorenzo Bianconi 2239fdc13979SLorenzo Bianconi nxmit++; 2240b0a43db9SLorenzo Bianconi } 2241b0a43db9SLorenzo Bianconi 2242b0a43db9SLorenzo Bianconi if (unlikely(flags & XDP_XMIT_FLUSH)) 2243b0a43db9SLorenzo Bianconi mvneta_txq_pend_desc_add(pp, txq, 0); 2244b0a43db9SLorenzo Bianconi __netif_tx_unlock(nq); 2245b0a43db9SLorenzo Bianconi 22467d51a015SLorenzo Bianconi u64_stats_update_begin(&stats->syncp); 22477d51a015SLorenzo Bianconi stats->es.ps.tx_bytes += nxmit_byte; 22487d51a015SLorenzo Bianconi stats->es.ps.tx_packets += nxmit; 22497d51a015SLorenzo Bianconi stats->es.ps.xdp_xmit += nxmit; 225015070919SJesper Dangaard Brouer stats->es.ps.xdp_xmit_err += num_frame - nxmit; 22517d51a015SLorenzo Bianconi u64_stats_update_end(&stats->syncp); 22527d51a015SLorenzo Bianconi 22537d51a015SLorenzo Bianconi return nxmit; 2254b0a43db9SLorenzo Bianconi } 2255b0a43db9SLorenzo Bianconi 2256b0a43db9SLorenzo Bianconi static int 22570db51da7SLorenzo Bianconi mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2258320d5441SLorenzo Bianconi struct bpf_prog *prog, struct xdp_buff *xdp, 22597d1643ebSLorenzo Bianconi u32 frame_sz, struct mvneta_stats *stats) 22600db51da7SLorenzo Bianconi { 22617d1643ebSLorenzo Bianconi unsigned int len, data_len, sync; 22628c4df83fSLorenzo Bianconi u32 ret, act; 22638c4df83fSLorenzo Bianconi 22648c4df83fSLorenzo Bianconi len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; 22657d1643ebSLorenzo Bianconi data_len = xdp->data_end - xdp->data; 22668c4df83fSLorenzo Bianconi act = bpf_prog_run_xdp(prog, xdp); 22670db51da7SLorenzo Bianconi 2268494f44d5SJesper Dangaard Brouer /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 2269494f44d5SJesper Dangaard Brouer sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; 2270494f44d5SJesper Dangaard Brouer sync = max(sync, len); 2271494f44d5SJesper Dangaard Brouer 22720db51da7SLorenzo Bianconi switch (act) { 22730db51da7SLorenzo Bianconi case XDP_PASS: 22743d866523SLorenzo Bianconi stats->xdp_pass++; 2275320d5441SLorenzo Bianconi return MVNETA_XDP_PASS; 22760db51da7SLorenzo Bianconi case XDP_REDIRECT: { 22770db51da7SLorenzo Bianconi int err; 22780db51da7SLorenzo Bianconi 22790db51da7SLorenzo Bianconi err = xdp_do_redirect(pp->dev, xdp, prog); 228015070919SJesper Dangaard Brouer if (unlikely(err)) { 2281d094c985SLorenzo Bianconi mvneta_xdp_put_buff(pp, rxq, xdp, sync); 22820db51da7SLorenzo Bianconi ret = MVNETA_XDP_DROPPED; 22830db51da7SLorenzo Bianconi } else { 22840db51da7SLorenzo Bianconi ret = MVNETA_XDP_REDIR; 22853d866523SLorenzo Bianconi stats->xdp_redirect++; 22860db51da7SLorenzo Bianconi } 22870db51da7SLorenzo Bianconi break; 22880db51da7SLorenzo Bianconi } 2289b0a43db9SLorenzo Bianconi case XDP_TX: 2290b0a43db9SLorenzo Bianconi ret = mvneta_xdp_xmit_back(pp, xdp); 22917d1643ebSLorenzo Bianconi if (ret != MVNETA_XDP_TX) 2292d094c985SLorenzo Bianconi mvneta_xdp_put_buff(pp, rxq, xdp, sync); 2293b0a43db9SLorenzo Bianconi break; 22940db51da7SLorenzo Bianconi default: 2295c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(pp->dev, prog, act); 2296df561f66SGustavo A. R. Silva fallthrough; 22970db51da7SLorenzo Bianconi case XDP_ABORTED: 22980db51da7SLorenzo Bianconi trace_xdp_exception(pp->dev, prog, act); 2299df561f66SGustavo A. R. Silva fallthrough; 23000db51da7SLorenzo Bianconi case XDP_DROP: 2301d094c985SLorenzo Bianconi mvneta_xdp_put_buff(pp, rxq, xdp, sync); 23020db51da7SLorenzo Bianconi ret = MVNETA_XDP_DROPPED; 23033d866523SLorenzo Bianconi stats->xdp_drop++; 23040db51da7SLorenzo Bianconi break; 23050db51da7SLorenzo Bianconi } 23060db51da7SLorenzo Bianconi 23077d1643ebSLorenzo Bianconi stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; 2308320d5441SLorenzo Bianconi stats->rx_packets++; 2309320d5441SLorenzo Bianconi 23100db51da7SLorenzo Bianconi return ret; 23110db51da7SLorenzo Bianconi } 23120db51da7SLorenzo Bianconi 2313afda408bSLorenzo Bianconi static void 23148dc9a088SLorenzo Bianconi mvneta_swbm_rx_frame(struct mvneta_port *pp, 23158dc9a088SLorenzo Bianconi struct mvneta_rx_desc *rx_desc, 23168dc9a088SLorenzo Bianconi struct mvneta_rx_queue *rxq, 2317c7a3a8cdSLorenzo Bianconi struct xdp_buff *xdp, int *size, 23183a8c4ad1SLorenzo Bianconi struct page *page) 23198dc9a088SLorenzo Bianconi { 23208dc9a088SLorenzo Bianconi unsigned char *data = page_address(page); 23218dc9a088SLorenzo Bianconi int data_len = -MVNETA_MH_SIZE, len; 23228dc9a088SLorenzo Bianconi struct net_device *dev = pp->dev; 23238dc9a088SLorenzo Bianconi enum dma_data_direction dma_dir; 23248dc9a088SLorenzo Bianconi 2325879456beSLorenzo Bianconi if (*size > MVNETA_MAX_RX_BUF_SIZE) { 23268dc9a088SLorenzo Bianconi len = MVNETA_MAX_RX_BUF_SIZE; 23278dc9a088SLorenzo Bianconi data_len += len; 23288dc9a088SLorenzo Bianconi } else { 2329879456beSLorenzo Bianconi len = *size; 23308dc9a088SLorenzo Bianconi data_len += len - ETH_FCS_LEN; 23318dc9a088SLorenzo Bianconi } 2332879456beSLorenzo Bianconi *size = *size - len; 23338dc9a088SLorenzo Bianconi 23348dc9a088SLorenzo Bianconi dma_dir = page_pool_get_dma_dir(rxq->page_pool); 23358dc9a088SLorenzo Bianconi dma_sync_single_for_cpu(dev->dev.parent, 23368dc9a088SLorenzo Bianconi rx_desc->buf_phys_addr, 23378dc9a088SLorenzo Bianconi len, dma_dir); 23388dc9a088SLorenzo Bianconi 2339879456beSLorenzo Bianconi rx_desc->buf_phys_addr = 0; 2340879456beSLorenzo Bianconi 2341fa383f6bSLorenzo Bianconi /* Prefetch header */ 2342fa383f6bSLorenzo Bianconi prefetch(data); 234376a67694SLorenzo Bianconi xdp_buff_clear_frags_flag(xdp); 2344be9df4afSLorenzo Bianconi xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, 2345be9df4afSLorenzo Bianconi data_len, false); 23468dc9a088SLorenzo Bianconi } 23478dc9a088SLorenzo Bianconi 23488dc9a088SLorenzo Bianconi static void 23498dc9a088SLorenzo Bianconi mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, 23508dc9a088SLorenzo Bianconi struct mvneta_rx_desc *rx_desc, 23518dc9a088SLorenzo Bianconi struct mvneta_rx_queue *rxq, 2352c7a3a8cdSLorenzo Bianconi struct xdp_buff *xdp, int *size, 23538dc9a088SLorenzo Bianconi struct page *page) 23548dc9a088SLorenzo Bianconi { 2355d094c985SLorenzo Bianconi struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 23568dc9a088SLorenzo Bianconi struct net_device *dev = pp->dev; 23578dc9a088SLorenzo Bianconi enum dma_data_direction dma_dir; 23588dc9a088SLorenzo Bianconi int data_len, len; 23598dc9a088SLorenzo Bianconi 2360c7a3a8cdSLorenzo Bianconi if (*size > MVNETA_MAX_RX_BUF_SIZE) { 23618dc9a088SLorenzo Bianconi len = MVNETA_MAX_RX_BUF_SIZE; 23628dc9a088SLorenzo Bianconi data_len = len; 23638dc9a088SLorenzo Bianconi } else { 2364c7a3a8cdSLorenzo Bianconi len = *size; 23658dc9a088SLorenzo Bianconi data_len = len - ETH_FCS_LEN; 23668dc9a088SLorenzo Bianconi } 23678dc9a088SLorenzo Bianconi dma_dir = page_pool_get_dma_dir(rxq->page_pool); 23688dc9a088SLorenzo Bianconi dma_sync_single_for_cpu(dev->dev.parent, 23698dc9a088SLorenzo Bianconi rx_desc->buf_phys_addr, 23708dc9a088SLorenzo Bianconi len, dma_dir); 23719c79a8abSLorenzo Bianconi rx_desc->buf_phys_addr = 0; 2372ca0e0146SLorenzo Bianconi 2373d094c985SLorenzo Bianconi if (!xdp_buff_has_frags(xdp)) 2374d094c985SLorenzo Bianconi sinfo->nr_frags = 0; 2375d094c985SLorenzo Bianconi 2376d094c985SLorenzo Bianconi if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { 2377d094c985SLorenzo Bianconi skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++]; 2378ca0e0146SLorenzo Bianconi 2379b51f4113SYunsheng Lin skb_frag_fill_page_desc(frag, page, 2380b51f4113SYunsheng Lin pp->rx_offset_correction, data_len); 238176a67694SLorenzo Bianconi 2382ed7a58cbSLorenzo Bianconi if (!xdp_buff_has_frags(xdp)) { 2383ed7a58cbSLorenzo Bianconi sinfo->xdp_frags_size = *size; 238476a67694SLorenzo Bianconi xdp_buff_set_frags_flag(xdp); 2385ed7a58cbSLorenzo Bianconi } 2386ed7a58cbSLorenzo Bianconi if (page_is_pfmemalloc(page)) 2387ed7a58cbSLorenzo Bianconi xdp_buff_set_frag_pfmemalloc(xdp); 23886ff63a15SLorenzo Bianconi } else { 23896ff63a15SLorenzo Bianconi page_pool_put_full_page(rxq->page_pool, page, true); 23906ff63a15SLorenzo Bianconi } 2391c7a3a8cdSLorenzo Bianconi *size -= len; 23928dc9a088SLorenzo Bianconi } 23938dc9a088SLorenzo Bianconi 2394ca0e0146SLorenzo Bianconi static struct sk_buff * 2395e4017570SMatteo Croce mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, 2396ca0e0146SLorenzo Bianconi struct xdp_buff *xdp, u32 desc_status) 2397ca0e0146SLorenzo Bianconi { 2398ca0e0146SLorenzo Bianconi struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2399ca0e0146SLorenzo Bianconi struct sk_buff *skb; 240076a67694SLorenzo Bianconi u8 num_frags; 240176a67694SLorenzo Bianconi 240276a67694SLorenzo Bianconi if (unlikely(xdp_buff_has_frags(xdp))) 240376a67694SLorenzo Bianconi num_frags = sinfo->nr_frags; 2404ca0e0146SLorenzo Bianconi 2405ca0e0146SLorenzo Bianconi skb = build_skb(xdp->data_hard_start, PAGE_SIZE); 2406ca0e0146SLorenzo Bianconi if (!skb) 2407ca0e0146SLorenzo Bianconi return ERR_PTR(-ENOMEM); 2408ca0e0146SLorenzo Bianconi 240957f05bc2SYunsheng Lin skb_mark_for_recycle(skb); 2410ca0e0146SLorenzo Bianconi 2411ca0e0146SLorenzo Bianconi skb_reserve(skb, xdp->data - xdp->data_hard_start); 2412ca0e0146SLorenzo Bianconi skb_put(skb, xdp->data_end - xdp->data); 2413aff0824dSLorenzo Bianconi skb->ip_summed = mvneta_rx_csum(pp, desc_status); 2414ca0e0146SLorenzo Bianconi 2415ed7a58cbSLorenzo Bianconi if (unlikely(xdp_buff_has_frags(xdp))) 2416ed7a58cbSLorenzo Bianconi xdp_update_skb_shared_info(skb, num_frags, 2417ed7a58cbSLorenzo Bianconi sinfo->xdp_frags_size, 2418ed7a58cbSLorenzo Bianconi num_frags * xdp->frame_sz, 2419ed7a58cbSLorenzo Bianconi xdp_buff_is_frag_pfmemalloc(xdp)); 242076a67694SLorenzo Bianconi 2421ca0e0146SLorenzo Bianconi return skb; 2422ca0e0146SLorenzo Bianconi } 2423ca0e0146SLorenzo Bianconi 2424dc35a10fSMarcin Wojtas /* Main rx processing when using software buffer management */ 24257a86f05fSAndrew Lunn static int mvneta_rx_swbm(struct napi_struct *napi, 2426562e2f46SYelena Krivosheev struct mvneta_port *pp, int budget, 2427c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 2428c5aff182SThomas Petazzoni { 2429c7a3a8cdSLorenzo Bianconi int rx_proc = 0, rx_todo, refill, size = 0; 2430c5aff182SThomas Petazzoni struct net_device *dev = pp->dev; 2431320d5441SLorenzo Bianconi struct mvneta_stats ps = {}; 24320db51da7SLorenzo Bianconi struct bpf_prog *xdp_prog; 24337d1643ebSLorenzo Bianconi u32 desc_status, frame_sz; 243405c748f7SLorenzo Bianconi struct xdp_buff xdp_buf; 243505c748f7SLorenzo Bianconi 243643b5169dSLorenzo Bianconi xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq); 243705c748f7SLorenzo Bianconi xdp_buf.data_hard_start = NULL; 2438c5aff182SThomas Petazzoni 2439c5aff182SThomas Petazzoni /* Get number of received packets */ 2440562e2f46SYelena Krivosheev rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); 2441c5aff182SThomas Petazzoni 24420db51da7SLorenzo Bianconi xdp_prog = READ_ONCE(pp->xdp_prog); 24430db51da7SLorenzo Bianconi 2444c5aff182SThomas Petazzoni /* Fairness NAPI loop */ 24458dc9a088SLorenzo Bianconi while (rx_proc < budget && rx_proc < rx_todo) { 2446c5aff182SThomas Petazzoni struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 24478dc9a088SLorenzo Bianconi u32 rx_status, index; 2448ca0e0146SLorenzo Bianconi struct sk_buff *skb; 24497e47fd84SGregory CLEMENT struct page *page; 2450c5aff182SThomas Petazzoni 2451f88bee1cSGregory CLEMENT index = rx_desc - rxq->descs; 24527e47fd84SGregory CLEMENT page = (struct page *)rxq->buf_virt_addr[index]; 2453c5aff182SThomas Petazzoni 2454562e2f46SYelena Krivosheev rx_status = rx_desc->status; 2455562e2f46SYelena Krivosheev rx_proc++; 2456562e2f46SYelena Krivosheev rxq->refill_num++; 2457562e2f46SYelena Krivosheev 2458562e2f46SYelena Krivosheev if (rx_status & MVNETA_RXD_FIRST_DESC) { 2459562e2f46SYelena Krivosheev /* Check errors only for FIRST descriptor */ 2460562e2f46SYelena Krivosheev if (rx_status & MVNETA_RXD_ERR_SUMMARY) { 24612eecb2e0SYelena Krivosheev mvneta_rx_error(pp, rx_desc); 2462ca0e0146SLorenzo Bianconi goto next; 2463c5aff182SThomas Petazzoni } 2464c5aff182SThomas Petazzoni 2465c7a3a8cdSLorenzo Bianconi size = rx_desc->data_size; 2466c7a3a8cdSLorenzo Bianconi frame_sz = size - ETH_FCS_LEN; 2467879456beSLorenzo Bianconi desc_status = rx_status; 24687d1643ebSLorenzo Bianconi 2469c7a3a8cdSLorenzo Bianconi mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, 24703a8c4ad1SLorenzo Bianconi &size, page); 2471562e2f46SYelena Krivosheev } else { 2472b6e11785SLorenzo Bianconi if (unlikely(!xdp_buf.data_hard_start)) { 2473b6e11785SLorenzo Bianconi rx_desc->buf_phys_addr = 0; 2474b6e11785SLorenzo Bianconi page_pool_put_full_page(rxq->page_pool, page, 2475b6e11785SLorenzo Bianconi true); 2476039fbc47SLorenzo Bianconi goto next; 2477b6e11785SLorenzo Bianconi } 2478ca0e0146SLorenzo Bianconi 2479ca0e0146SLorenzo Bianconi mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, 2480d094c985SLorenzo Bianconi &size, page); 2481562e2f46SYelena Krivosheev } /* Middle or Last descriptor */ 2482562e2f46SYelena Krivosheev 2483562e2f46SYelena Krivosheev if (!(rx_status & MVNETA_RXD_LAST_DESC)) 2484562e2f46SYelena Krivosheev /* no last descriptor this time */ 2485562e2f46SYelena Krivosheev continue; 2486562e2f46SYelena Krivosheev 2487c7a3a8cdSLorenzo Bianconi if (size) { 2488d094c985SLorenzo Bianconi mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); 2489ca0e0146SLorenzo Bianconi goto next; 2490562e2f46SYelena Krivosheev } 2491320d5441SLorenzo Bianconi 2492afda408bSLorenzo Bianconi if (xdp_prog && 24937d1643ebSLorenzo Bianconi mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) 2494afda408bSLorenzo Bianconi goto next; 2495afda408bSLorenzo Bianconi 2496e4017570SMatteo Croce skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status); 2497ca0e0146SLorenzo Bianconi if (IS_ERR(skb)) { 2498ca0e0146SLorenzo Bianconi struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2499ca0e0146SLorenzo Bianconi 2500d094c985SLorenzo Bianconi mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); 2501ca0e0146SLorenzo Bianconi 2502ca0e0146SLorenzo Bianconi u64_stats_update_begin(&stats->syncp); 2503ca0e0146SLorenzo Bianconi stats->es.skb_alloc_error++; 2504ca0e0146SLorenzo Bianconi stats->rx_dropped++; 2505ca0e0146SLorenzo Bianconi u64_stats_update_end(&stats->syncp); 2506ca0e0146SLorenzo Bianconi 2507ca0e0146SLorenzo Bianconi goto next; 2508ca0e0146SLorenzo Bianconi } 2509ca0e0146SLorenzo Bianconi 2510ca0e0146SLorenzo Bianconi ps.rx_bytes += skb->len; 2511320d5441SLorenzo Bianconi ps.rx_packets++; 2512c5aff182SThomas Petazzoni 2513ca0e0146SLorenzo Bianconi skb->protocol = eth_type_trans(skb, dev); 2514ca0e0146SLorenzo Bianconi napi_gro_receive(napi, skb); 2515ca0e0146SLorenzo Bianconi next: 2516ca0e0146SLorenzo Bianconi xdp_buf.data_hard_start = NULL; 2517c5aff182SThomas Petazzoni } 25180db51da7SLorenzo Bianconi 2519039fbc47SLorenzo Bianconi if (xdp_buf.data_hard_start) 2520d094c985SLorenzo Bianconi mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); 2521ca0e0146SLorenzo Bianconi 25226c8a8cfdSLorenzo Bianconi if (ps.xdp_redirect) 25230db51da7SLorenzo Bianconi xdp_do_flush_map(); 2524c5aff182SThomas Petazzoni 2525320d5441SLorenzo Bianconi if (ps.rx_packets) 2526320d5441SLorenzo Bianconi mvneta_update_stats(pp, &ps); 2527dc4277ddSwilly tarreau 2528562e2f46SYelena Krivosheev /* return some buffers to hardware queue, one at a time is too slow */ 2529562e2f46SYelena Krivosheev refill = mvneta_rx_refill_queue(pp, rxq); 2530c5aff182SThomas Petazzoni 2531562e2f46SYelena Krivosheev /* Update rxq management counters */ 2532562e2f46SYelena Krivosheev mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); 2533562e2f46SYelena Krivosheev 2534320d5441SLorenzo Bianconi return ps.rx_packets; 2535c5aff182SThomas Petazzoni } 2536c5aff182SThomas Petazzoni 2537dc35a10fSMarcin Wojtas /* Main rx processing when using hardware buffer management */ 25387a86f05fSAndrew Lunn static int mvneta_rx_hwbm(struct napi_struct *napi, 25397a86f05fSAndrew Lunn struct mvneta_port *pp, int rx_todo, 2540dc35a10fSMarcin Wojtas struct mvneta_rx_queue *rxq) 2541dc35a10fSMarcin Wojtas { 2542dc35a10fSMarcin Wojtas struct net_device *dev = pp->dev; 2543dc35a10fSMarcin Wojtas int rx_done; 2544dc35a10fSMarcin Wojtas u32 rcvd_pkts = 0; 2545dc35a10fSMarcin Wojtas u32 rcvd_bytes = 0; 2546dc35a10fSMarcin Wojtas 2547dc35a10fSMarcin Wojtas /* Get number of received packets */ 2548dc35a10fSMarcin Wojtas rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 2549dc35a10fSMarcin Wojtas 2550dc35a10fSMarcin Wojtas if (rx_todo > rx_done) 2551dc35a10fSMarcin Wojtas rx_todo = rx_done; 2552dc35a10fSMarcin Wojtas 2553dc35a10fSMarcin Wojtas rx_done = 0; 2554dc35a10fSMarcin Wojtas 2555dc35a10fSMarcin Wojtas /* Fairness NAPI loop */ 2556dc35a10fSMarcin Wojtas while (rx_done < rx_todo) { 2557dc35a10fSMarcin Wojtas struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 2558dc35a10fSMarcin Wojtas struct mvneta_bm_pool *bm_pool = NULL; 2559dc35a10fSMarcin Wojtas struct sk_buff *skb; 2560dc35a10fSMarcin Wojtas unsigned char *data; 2561dc35a10fSMarcin Wojtas dma_addr_t phys_addr; 2562dc35a10fSMarcin Wojtas u32 rx_status, frag_size; 2563dc35a10fSMarcin Wojtas int rx_bytes, err; 2564dc35a10fSMarcin Wojtas u8 pool_id; 2565dc35a10fSMarcin Wojtas 2566dc35a10fSMarcin Wojtas rx_done++; 2567dc35a10fSMarcin Wojtas rx_status = rx_desc->status; 2568dc35a10fSMarcin Wojtas rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 2569f88bee1cSGregory CLEMENT data = (u8 *)(uintptr_t)rx_desc->buf_cookie; 2570dc35a10fSMarcin Wojtas phys_addr = rx_desc->buf_phys_addr; 2571dc35a10fSMarcin Wojtas pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); 2572dc35a10fSMarcin Wojtas bm_pool = &pp->bm_priv->bm_pools[pool_id]; 2573dc35a10fSMarcin Wojtas 2574dc35a10fSMarcin Wojtas if (!mvneta_rxq_desc_is_first_last(rx_status) || 2575dc35a10fSMarcin Wojtas (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 2576dc35a10fSMarcin Wojtas err_drop_frame_ret_pool: 2577dc35a10fSMarcin Wojtas /* Return the buffer to the pool */ 2578dc35a10fSMarcin Wojtas mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2579dc35a10fSMarcin Wojtas rx_desc->buf_phys_addr); 2580dc35a10fSMarcin Wojtas err_drop_frame: 2581dc35a10fSMarcin Wojtas mvneta_rx_error(pp, rx_desc); 2582dc35a10fSMarcin Wojtas /* leave the descriptor untouched */ 2583dc35a10fSMarcin Wojtas continue; 2584dc35a10fSMarcin Wojtas } 2585dc35a10fSMarcin Wojtas 2586dc35a10fSMarcin Wojtas if (rx_bytes <= rx_copybreak) { 2587dc35a10fSMarcin Wojtas /* better copy a small frame and not unmap the DMA region */ 2588dc35a10fSMarcin Wojtas skb = netdev_alloc_skb_ip_align(dev, rx_bytes); 2589dc35a10fSMarcin Wojtas if (unlikely(!skb)) 2590dc35a10fSMarcin Wojtas goto err_drop_frame_ret_pool; 2591dc35a10fSMarcin Wojtas 2592a8fef9baSRussell King dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, 2593dc35a10fSMarcin Wojtas rx_desc->buf_phys_addr, 2594dc35a10fSMarcin Wojtas MVNETA_MH_SIZE + NET_SKB_PAD, 2595dc35a10fSMarcin Wojtas rx_bytes, 2596dc35a10fSMarcin Wojtas DMA_FROM_DEVICE); 259759ae1d12SJohannes Berg skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD, 2598dc35a10fSMarcin Wojtas rx_bytes); 2599dc35a10fSMarcin Wojtas 2600dc35a10fSMarcin Wojtas skb->protocol = eth_type_trans(skb, dev); 2601aff0824dSLorenzo Bianconi skb->ip_summed = mvneta_rx_csum(pp, rx_status); 26027a86f05fSAndrew Lunn napi_gro_receive(napi, skb); 2603dc35a10fSMarcin Wojtas 2604dc35a10fSMarcin Wojtas rcvd_pkts++; 2605dc35a10fSMarcin Wojtas rcvd_bytes += rx_bytes; 2606dc35a10fSMarcin Wojtas 2607dc35a10fSMarcin Wojtas /* Return the buffer to the pool */ 2608dc35a10fSMarcin Wojtas mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2609dc35a10fSMarcin Wojtas rx_desc->buf_phys_addr); 2610dc35a10fSMarcin Wojtas 2611dc35a10fSMarcin Wojtas /* leave the descriptor and buffer untouched */ 2612dc35a10fSMarcin Wojtas continue; 2613dc35a10fSMarcin Wojtas } 2614dc35a10fSMarcin Wojtas 2615dc35a10fSMarcin Wojtas /* Refill processing */ 2616baa11ebcSGregory CLEMENT err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); 2617dc35a10fSMarcin Wojtas if (err) { 26189ac41f3cSLorenzo Bianconi struct mvneta_pcpu_stats *stats; 26199ac41f3cSLorenzo Bianconi 2620dc35a10fSMarcin Wojtas netdev_err(dev, "Linux processing - Can't refill\n"); 26219ac41f3cSLorenzo Bianconi 26229ac41f3cSLorenzo Bianconi stats = this_cpu_ptr(pp->stats); 26239ac41f3cSLorenzo Bianconi u64_stats_update_begin(&stats->syncp); 26249ac41f3cSLorenzo Bianconi stats->es.refill_error++; 26259ac41f3cSLorenzo Bianconi u64_stats_update_end(&stats->syncp); 26269ac41f3cSLorenzo Bianconi 2627dc35a10fSMarcin Wojtas goto err_drop_frame_ret_pool; 2628dc35a10fSMarcin Wojtas } 2629dc35a10fSMarcin Wojtas 2630baa11ebcSGregory CLEMENT frag_size = bm_pool->hwbm_pool.frag_size; 2631dc35a10fSMarcin Wojtas 2632dc35a10fSMarcin Wojtas skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); 2633dc35a10fSMarcin Wojtas 2634dc35a10fSMarcin Wojtas /* After refill old buffer has to be unmapped regardless 2635dc35a10fSMarcin Wojtas * the skb is successfully built or not. 2636dc35a10fSMarcin Wojtas */ 2637dc35a10fSMarcin Wojtas dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, 2638dc35a10fSMarcin Wojtas bm_pool->buf_size, DMA_FROM_DEVICE); 2639dc35a10fSMarcin Wojtas if (!skb) 2640dc35a10fSMarcin Wojtas goto err_drop_frame; 2641dc35a10fSMarcin Wojtas 2642dc35a10fSMarcin Wojtas rcvd_pkts++; 2643dc35a10fSMarcin Wojtas rcvd_bytes += rx_bytes; 2644dc35a10fSMarcin Wojtas 2645dc35a10fSMarcin Wojtas /* Linux processing */ 2646dc35a10fSMarcin Wojtas skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); 2647dc35a10fSMarcin Wojtas skb_put(skb, rx_bytes); 2648dc35a10fSMarcin Wojtas 2649dc35a10fSMarcin Wojtas skb->protocol = eth_type_trans(skb, dev); 2650aff0824dSLorenzo Bianconi skb->ip_summed = mvneta_rx_csum(pp, rx_status); 2651dc35a10fSMarcin Wojtas 26527a86f05fSAndrew Lunn napi_gro_receive(napi, skb); 2653dc35a10fSMarcin Wojtas } 2654dc35a10fSMarcin Wojtas 265569de66fcSLorenzo Bianconi if (rcvd_pkts) { 265669de66fcSLorenzo Bianconi struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 265769de66fcSLorenzo Bianconi 265869de66fcSLorenzo Bianconi u64_stats_update_begin(&stats->syncp); 2659320d5441SLorenzo Bianconi stats->es.ps.rx_packets += rcvd_pkts; 2660320d5441SLorenzo Bianconi stats->es.ps.rx_bytes += rcvd_bytes; 266169de66fcSLorenzo Bianconi u64_stats_update_end(&stats->syncp); 266269de66fcSLorenzo Bianconi } 2663dc35a10fSMarcin Wojtas 2664dc35a10fSMarcin Wojtas /* Update rxq management counters */ 2665dc35a10fSMarcin Wojtas mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 2666dc35a10fSMarcin Wojtas 2667dc35a10fSMarcin Wojtas return rx_done; 2668dc35a10fSMarcin Wojtas } 2669dc35a10fSMarcin Wojtas 267033f4cefbSRussell King (Oracle) static void mvneta_free_tso_hdrs(struct mvneta_port *pp, 267133f4cefbSRussell King (Oracle) struct mvneta_tx_queue *txq) 267233f4cefbSRussell King (Oracle) { 267333f4cefbSRussell King (Oracle) struct device *dev = pp->dev->dev.parent; 267433f4cefbSRussell King (Oracle) int i; 267533f4cefbSRussell King (Oracle) 267633f4cefbSRussell King (Oracle) for (i = 0; i < MVNETA_MAX_TSO_PAGES; i++) { 267733f4cefbSRussell King (Oracle) if (txq->tso_hdrs[i]) { 267833f4cefbSRussell King (Oracle) dma_free_coherent(dev, MVNETA_TSO_PAGE_SIZE, 267933f4cefbSRussell King (Oracle) txq->tso_hdrs[i], 268033f4cefbSRussell King (Oracle) txq->tso_hdrs_phys[i]); 268133f4cefbSRussell King (Oracle) txq->tso_hdrs[i] = NULL; 268233f4cefbSRussell King (Oracle) } 268333f4cefbSRussell King (Oracle) } 268433f4cefbSRussell King (Oracle) } 268533f4cefbSRussell King (Oracle) 268633f4cefbSRussell King (Oracle) static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp, 268733f4cefbSRussell King (Oracle) struct mvneta_tx_queue *txq) 268833f4cefbSRussell King (Oracle) { 268933f4cefbSRussell King (Oracle) struct device *dev = pp->dev->dev.parent; 269033f4cefbSRussell King (Oracle) int i, num; 269133f4cefbSRussell King (Oracle) 269233f4cefbSRussell King (Oracle) num = DIV_ROUND_UP(txq->size, MVNETA_TSO_PER_PAGE); 269333f4cefbSRussell King (Oracle) for (i = 0; i < num; i++) { 269433f4cefbSRussell King (Oracle) txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE, 269533f4cefbSRussell King (Oracle) &txq->tso_hdrs_phys[i], 269633f4cefbSRussell King (Oracle) GFP_KERNEL); 269733f4cefbSRussell King (Oracle) if (!txq->tso_hdrs[i]) { 269833f4cefbSRussell King (Oracle) mvneta_free_tso_hdrs(pp, txq); 269933f4cefbSRussell King (Oracle) return -ENOMEM; 270033f4cefbSRussell King (Oracle) } 270133f4cefbSRussell King (Oracle) } 270233f4cefbSRussell King (Oracle) 270333f4cefbSRussell King (Oracle) return 0; 270433f4cefbSRussell King (Oracle) } 270533f4cefbSRussell King (Oracle) 270633f4cefbSRussell King (Oracle) static char *mvneta_get_tso_hdr(struct mvneta_tx_queue *txq, dma_addr_t *dma) 270733f4cefbSRussell King (Oracle) { 270833f4cefbSRussell King (Oracle) int index, offset; 270933f4cefbSRussell King (Oracle) 271033f4cefbSRussell King (Oracle) index = txq->txq_put_index / MVNETA_TSO_PER_PAGE; 271133f4cefbSRussell King (Oracle) offset = (txq->txq_put_index % MVNETA_TSO_PER_PAGE) * TSO_HEADER_SIZE; 271233f4cefbSRussell King (Oracle) 271333f4cefbSRussell King (Oracle) *dma = txq->tso_hdrs_phys[index] + offset; 271433f4cefbSRussell King (Oracle) 271533f4cefbSRussell King (Oracle) return txq->tso_hdrs[index] + offset; 271633f4cefbSRussell King (Oracle) } 271733f4cefbSRussell King (Oracle) 2718d41eb555SRussell King (Oracle) static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq, 2719d41eb555SRussell King (Oracle) struct tso_t *tso, int size, bool is_last) 27202adb719dSEzequiel Garcia { 27219e58c8b4SLorenzo Bianconi struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 272233f4cefbSRussell King (Oracle) int hdr_len = skb_tcp_all_headers(skb); 27239e58c8b4SLorenzo Bianconi struct mvneta_tx_desc *tx_desc; 272433f4cefbSRussell King (Oracle) dma_addr_t hdr_phys; 2725d41eb555SRussell King (Oracle) char *hdr; 2726d41eb555SRussell King (Oracle) 272733f4cefbSRussell King (Oracle) hdr = mvneta_get_tso_hdr(txq, &hdr_phys); 2728d41eb555SRussell King (Oracle) tso_build_hdr(skb, hdr, tso, size, is_last); 27292adb719dSEzequiel Garcia 27302adb719dSEzequiel Garcia tx_desc = mvneta_txq_next_desc_get(txq); 27312adb719dSEzequiel Garcia tx_desc->data_size = hdr_len; 273220d446f2SYuval Shaia tx_desc->command = mvneta_skb_tx_csum(skb); 27332adb719dSEzequiel Garcia tx_desc->command |= MVNETA_TXD_F_DESC; 273433f4cefbSRussell King (Oracle) tx_desc->buf_phys_addr = hdr_phys; 2735b0bd1b07SRussell King (Oracle) buf->type = MVNETA_TYPE_TSO; 27369e58c8b4SLorenzo Bianconi buf->skb = NULL; 27379e58c8b4SLorenzo Bianconi 27382adb719dSEzequiel Garcia mvneta_txq_inc_put(txq); 27392adb719dSEzequiel Garcia } 27402adb719dSEzequiel Garcia 27412adb719dSEzequiel Garcia static inline int 27422adb719dSEzequiel Garcia mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, 27432adb719dSEzequiel Garcia struct sk_buff *skb, char *data, int size, 27442adb719dSEzequiel Garcia bool last_tcp, bool is_last) 27452adb719dSEzequiel Garcia { 27469e58c8b4SLorenzo Bianconi struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 27472adb719dSEzequiel Garcia struct mvneta_tx_desc *tx_desc; 27482adb719dSEzequiel Garcia 27492adb719dSEzequiel Garcia tx_desc = mvneta_txq_next_desc_get(txq); 27502adb719dSEzequiel Garcia tx_desc->data_size = size; 27512adb719dSEzequiel Garcia tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, 27522adb719dSEzequiel Garcia size, DMA_TO_DEVICE); 27532adb719dSEzequiel Garcia if (unlikely(dma_mapping_error(dev->dev.parent, 27542adb719dSEzequiel Garcia tx_desc->buf_phys_addr))) { 27552adb719dSEzequiel Garcia mvneta_txq_desc_put(txq); 27562adb719dSEzequiel Garcia return -ENOMEM; 27572adb719dSEzequiel Garcia } 27582adb719dSEzequiel Garcia 27592adb719dSEzequiel Garcia tx_desc->command = 0; 27609e58c8b4SLorenzo Bianconi buf->type = MVNETA_TYPE_SKB; 27619e58c8b4SLorenzo Bianconi buf->skb = NULL; 27622adb719dSEzequiel Garcia 27632adb719dSEzequiel Garcia if (last_tcp) { 27642adb719dSEzequiel Garcia /* last descriptor in the TCP packet */ 27652adb719dSEzequiel Garcia tx_desc->command = MVNETA_TXD_L_DESC; 27662adb719dSEzequiel Garcia 27672adb719dSEzequiel Garcia /* last descriptor in SKB */ 27682adb719dSEzequiel Garcia if (is_last) 27699e58c8b4SLorenzo Bianconi buf->skb = skb; 27702adb719dSEzequiel Garcia } 27712adb719dSEzequiel Garcia mvneta_txq_inc_put(txq); 27722adb719dSEzequiel Garcia return 0; 27732adb719dSEzequiel Garcia } 27742adb719dSEzequiel Garcia 2775fef99e84SRussell King (Oracle) static void mvneta_release_descs(struct mvneta_port *pp, 2776fef99e84SRussell King (Oracle) struct mvneta_tx_queue *txq, 2777fef99e84SRussell King (Oracle) int first, int num) 2778fef99e84SRussell King (Oracle) { 2779fef99e84SRussell King (Oracle) int desc_idx, i; 2780fef99e84SRussell King (Oracle) 2781fef99e84SRussell King (Oracle) desc_idx = first + num; 2782fef99e84SRussell King (Oracle) if (desc_idx >= txq->size) 2783fef99e84SRussell King (Oracle) desc_idx -= txq->size; 2784fef99e84SRussell King (Oracle) 2785fef99e84SRussell King (Oracle) for (i = num; i >= 0; i--) { 2786fef99e84SRussell King (Oracle) struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx; 2787f00ba4f4SRussell King (Oracle) struct mvneta_tx_buf *buf = &txq->buf[desc_idx]; 2788fef99e84SRussell King (Oracle) 2789f00ba4f4SRussell King (Oracle) if (buf->type == MVNETA_TYPE_SKB) 2790fef99e84SRussell King (Oracle) dma_unmap_single(pp->dev->dev.parent, 2791fef99e84SRussell King (Oracle) tx_desc->buf_phys_addr, 2792fef99e84SRussell King (Oracle) tx_desc->data_size, 2793fef99e84SRussell King (Oracle) DMA_TO_DEVICE); 2794fef99e84SRussell King (Oracle) 2795fef99e84SRussell King (Oracle) mvneta_txq_desc_put(txq); 2796fef99e84SRussell King (Oracle) 2797fef99e84SRussell King (Oracle) if (desc_idx == 0) 2798fef99e84SRussell King (Oracle) desc_idx = txq->size; 2799fef99e84SRussell King (Oracle) desc_idx -= 1; 2800fef99e84SRussell King (Oracle) } 2801fef99e84SRussell King (Oracle) } 2802fef99e84SRussell King (Oracle) 28032adb719dSEzequiel Garcia static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, 28042adb719dSEzequiel Garcia struct mvneta_tx_queue *txq) 28052adb719dSEzequiel Garcia { 2806761b331cSEric Dumazet int hdr_len, total_len, data_left; 2807fef99e84SRussell King (Oracle) int first_desc, desc_count = 0; 28082adb719dSEzequiel Garcia struct mvneta_port *pp = netdev_priv(dev); 28092adb719dSEzequiel Garcia struct tso_t tso; 28102adb719dSEzequiel Garcia 28112adb719dSEzequiel Garcia /* Count needed descriptors */ 28122adb719dSEzequiel Garcia if ((txq->count + tso_count_descs(skb)) >= txq->size) 28132adb719dSEzequiel Garcia return 0; 28142adb719dSEzequiel Garcia 2815504148feSEric Dumazet if (skb_headlen(skb) < skb_tcp_all_headers(skb)) { 2816fa660684SColin Ian King pr_info("*** Is this even possible?\n"); 28172adb719dSEzequiel Garcia return 0; 28182adb719dSEzequiel Garcia } 28192adb719dSEzequiel Garcia 2820fef99e84SRussell King (Oracle) first_desc = txq->txq_put_index; 2821fef99e84SRussell King (Oracle) 28222adb719dSEzequiel Garcia /* Initialize the TSO handler, and prepare the first payload */ 2823761b331cSEric Dumazet hdr_len = tso_start(skb, &tso); 28242adb719dSEzequiel Garcia 28252adb719dSEzequiel Garcia total_len = skb->len - hdr_len; 28262adb719dSEzequiel Garcia while (total_len > 0) { 28272adb719dSEzequiel Garcia data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 28282adb719dSEzequiel Garcia total_len -= data_left; 28292adb719dSEzequiel Garcia desc_count++; 28302adb719dSEzequiel Garcia 28312adb719dSEzequiel Garcia /* prepare packet headers: MAC + IP + TCP */ 2832d41eb555SRussell King (Oracle) mvneta_tso_put_hdr(skb, txq, &tso, data_left, total_len == 0); 28332adb719dSEzequiel Garcia 28342adb719dSEzequiel Garcia while (data_left > 0) { 28352adb719dSEzequiel Garcia int size; 28362adb719dSEzequiel Garcia desc_count++; 28372adb719dSEzequiel Garcia 28382adb719dSEzequiel Garcia size = min_t(int, tso.size, data_left); 28392adb719dSEzequiel Garcia 28402adb719dSEzequiel Garcia if (mvneta_tso_put_data(dev, txq, skb, 28412adb719dSEzequiel Garcia tso.data, size, 28422adb719dSEzequiel Garcia size == data_left, 28432adb719dSEzequiel Garcia total_len == 0)) 28442adb719dSEzequiel Garcia goto err_release; 28452adb719dSEzequiel Garcia data_left -= size; 28462adb719dSEzequiel Garcia 28472adb719dSEzequiel Garcia tso_build_data(skb, &tso, size); 28482adb719dSEzequiel Garcia } 28492adb719dSEzequiel Garcia } 28502adb719dSEzequiel Garcia 28512adb719dSEzequiel Garcia return desc_count; 28522adb719dSEzequiel Garcia 28532adb719dSEzequiel Garcia err_release: 28542adb719dSEzequiel Garcia /* Release all used data descriptors; header descriptors must not 28552adb719dSEzequiel Garcia * be DMA-unmapped. 28562adb719dSEzequiel Garcia */ 2857fef99e84SRussell King (Oracle) mvneta_release_descs(pp, txq, first_desc, desc_count - 1); 28582adb719dSEzequiel Garcia return 0; 28592adb719dSEzequiel Garcia } 28602adb719dSEzequiel Garcia 2861c5aff182SThomas Petazzoni /* Handle tx fragmentation processing */ 2862c5aff182SThomas Petazzoni static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 2863c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 2864c5aff182SThomas Petazzoni { 2865c5aff182SThomas Petazzoni struct mvneta_tx_desc *tx_desc; 28663d4ea02fSEzequiel Garcia int i, nr_frags = skb_shinfo(skb)->nr_frags; 2867fef99e84SRussell King (Oracle) int first_desc = txq->txq_put_index; 2868c5aff182SThomas Petazzoni 28693d4ea02fSEzequiel Garcia for (i = 0; i < nr_frags; i++) { 28709e58c8b4SLorenzo Bianconi struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2871c5aff182SThomas Petazzoni skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2872d7840976SMatthew Wilcox (Oracle) void *addr = skb_frag_address(frag); 2873c5aff182SThomas Petazzoni 2874c5aff182SThomas Petazzoni tx_desc = mvneta_txq_next_desc_get(txq); 2875d7840976SMatthew Wilcox (Oracle) tx_desc->data_size = skb_frag_size(frag); 2876c5aff182SThomas Petazzoni 2877c5aff182SThomas Petazzoni tx_desc->buf_phys_addr = 2878c5aff182SThomas Petazzoni dma_map_single(pp->dev->dev.parent, addr, 2879c5aff182SThomas Petazzoni tx_desc->data_size, DMA_TO_DEVICE); 2880c5aff182SThomas Petazzoni 2881c5aff182SThomas Petazzoni if (dma_mapping_error(pp->dev->dev.parent, 2882c5aff182SThomas Petazzoni tx_desc->buf_phys_addr)) { 2883c5aff182SThomas Petazzoni mvneta_txq_desc_put(txq); 2884c5aff182SThomas Petazzoni goto error; 2885c5aff182SThomas Petazzoni } 2886c5aff182SThomas Petazzoni 28873d4ea02fSEzequiel Garcia if (i == nr_frags - 1) { 2888c5aff182SThomas Petazzoni /* Last descriptor */ 2889c5aff182SThomas Petazzoni tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 28909e58c8b4SLorenzo Bianconi buf->skb = skb; 2891c5aff182SThomas Petazzoni } else { 2892c5aff182SThomas Petazzoni /* Descriptor in the middle: Not First, Not Last */ 2893c5aff182SThomas Petazzoni tx_desc->command = 0; 28949e58c8b4SLorenzo Bianconi buf->skb = NULL; 2895c5aff182SThomas Petazzoni } 28969e58c8b4SLorenzo Bianconi buf->type = MVNETA_TYPE_SKB; 28973d4ea02fSEzequiel Garcia mvneta_txq_inc_put(txq); 2898c5aff182SThomas Petazzoni } 2899c5aff182SThomas Petazzoni 2900c5aff182SThomas Petazzoni return 0; 2901c5aff182SThomas Petazzoni 2902c5aff182SThomas Petazzoni error: 2903c5aff182SThomas Petazzoni /* Release all descriptors that were used to map fragments of 29046a20c175SThomas Petazzoni * this packet, as well as the corresponding DMA mappings 29056a20c175SThomas Petazzoni */ 2906fef99e84SRussell King (Oracle) mvneta_release_descs(pp, txq, first_desc, i - 1); 2907c5aff182SThomas Petazzoni return -ENOMEM; 2908c5aff182SThomas Petazzoni } 2909c5aff182SThomas Petazzoni 2910c5aff182SThomas Petazzoni /* Main tx processing */ 2911f03508ceSYueHaibing static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) 2912c5aff182SThomas Petazzoni { 2913c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 2914ee40a116SWilly Tarreau u16 txq_id = skb_get_queue_mapping(skb); 2915ee40a116SWilly Tarreau struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 29169e58c8b4SLorenzo Bianconi struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2917c5aff182SThomas Petazzoni struct mvneta_tx_desc *tx_desc; 29185f478b41SEric Dumazet int len = skb->len; 2919c5aff182SThomas Petazzoni int frags = 0; 2920c5aff182SThomas Petazzoni u32 tx_cmd; 2921c5aff182SThomas Petazzoni 2922c5aff182SThomas Petazzoni if (!netif_running(dev)) 2923c5aff182SThomas Petazzoni goto out; 2924c5aff182SThomas Petazzoni 29252adb719dSEzequiel Garcia if (skb_is_gso(skb)) { 29262adb719dSEzequiel Garcia frags = mvneta_tx_tso(skb, dev, txq); 29272adb719dSEzequiel Garcia goto out; 29282adb719dSEzequiel Garcia } 29292adb719dSEzequiel Garcia 2930c5aff182SThomas Petazzoni frags = skb_shinfo(skb)->nr_frags + 1; 2931c5aff182SThomas Petazzoni 2932c5aff182SThomas Petazzoni /* Get a descriptor for the first part of the packet */ 2933c5aff182SThomas Petazzoni tx_desc = mvneta_txq_next_desc_get(txq); 2934c5aff182SThomas Petazzoni 293520d446f2SYuval Shaia tx_cmd = mvneta_skb_tx_csum(skb); 2936c5aff182SThomas Petazzoni 2937c5aff182SThomas Petazzoni tx_desc->data_size = skb_headlen(skb); 2938c5aff182SThomas Petazzoni 2939c5aff182SThomas Petazzoni tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, 2940c5aff182SThomas Petazzoni tx_desc->data_size, 2941c5aff182SThomas Petazzoni DMA_TO_DEVICE); 2942c5aff182SThomas Petazzoni if (unlikely(dma_mapping_error(dev->dev.parent, 2943c5aff182SThomas Petazzoni tx_desc->buf_phys_addr))) { 2944c5aff182SThomas Petazzoni mvneta_txq_desc_put(txq); 2945c5aff182SThomas Petazzoni frags = 0; 2946c5aff182SThomas Petazzoni goto out; 2947c5aff182SThomas Petazzoni } 2948c5aff182SThomas Petazzoni 29499e58c8b4SLorenzo Bianconi buf->type = MVNETA_TYPE_SKB; 2950c5aff182SThomas Petazzoni if (frags == 1) { 2951c5aff182SThomas Petazzoni /* First and Last descriptor */ 2952c5aff182SThomas Petazzoni tx_cmd |= MVNETA_TXD_FLZ_DESC; 2953c5aff182SThomas Petazzoni tx_desc->command = tx_cmd; 29549e58c8b4SLorenzo Bianconi buf->skb = skb; 2955c5aff182SThomas Petazzoni mvneta_txq_inc_put(txq); 2956c5aff182SThomas Petazzoni } else { 2957c5aff182SThomas Petazzoni /* First but not Last */ 2958c5aff182SThomas Petazzoni tx_cmd |= MVNETA_TXD_F_DESC; 29599e58c8b4SLorenzo Bianconi buf->skb = NULL; 2960c5aff182SThomas Petazzoni mvneta_txq_inc_put(txq); 2961c5aff182SThomas Petazzoni tx_desc->command = tx_cmd; 2962c5aff182SThomas Petazzoni /* Continue with other skb fragments */ 2963c5aff182SThomas Petazzoni if (mvneta_tx_frag_process(pp, skb, txq)) { 2964c5aff182SThomas Petazzoni dma_unmap_single(dev->dev.parent, 2965c5aff182SThomas Petazzoni tx_desc->buf_phys_addr, 2966c5aff182SThomas Petazzoni tx_desc->data_size, 2967c5aff182SThomas Petazzoni DMA_TO_DEVICE); 2968c5aff182SThomas Petazzoni mvneta_txq_desc_put(txq); 2969c5aff182SThomas Petazzoni frags = 0; 2970c5aff182SThomas Petazzoni goto out; 2971c5aff182SThomas Petazzoni } 2972c5aff182SThomas Petazzoni } 2973c5aff182SThomas Petazzoni 2974e19d2ddaSEzequiel Garcia out: 2975e19d2ddaSEzequiel Garcia if (frags > 0) { 2976e19d2ddaSEzequiel Garcia struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 297769de66fcSLorenzo Bianconi struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2978e19d2ddaSEzequiel Garcia 2979a29b6235SMarcin Wojtas netdev_tx_sent_queue(nq, len); 2980a29b6235SMarcin Wojtas 2981c5aff182SThomas Petazzoni txq->count += frags; 29828eef5f97SEzequiel Garcia if (txq->count >= txq->tx_stop_threshold) 2983c5aff182SThomas Petazzoni netif_tx_stop_queue(nq); 2984c5aff182SThomas Petazzoni 29856b16f9eeSFlorian Westphal if (!netdev_xmit_more() || netif_xmit_stopped(nq) || 29862a90f7e1SSimon Guinot txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) 29872a90f7e1SSimon Guinot mvneta_txq_pend_desc_add(pp, txq, frags); 29882a90f7e1SSimon Guinot else 29892a90f7e1SSimon Guinot txq->pending += frags; 29902a90f7e1SSimon Guinot 299169de66fcSLorenzo Bianconi u64_stats_update_begin(&stats->syncp); 2992320d5441SLorenzo Bianconi stats->es.ps.tx_bytes += len; 2993320d5441SLorenzo Bianconi stats->es.ps.tx_packets++; 299469de66fcSLorenzo Bianconi u64_stats_update_end(&stats->syncp); 2995c5aff182SThomas Petazzoni } else { 2996c5aff182SThomas Petazzoni dev->stats.tx_dropped++; 2997c5aff182SThomas Petazzoni dev_kfree_skb_any(skb); 2998c5aff182SThomas Petazzoni } 2999c5aff182SThomas Petazzoni 3000c5aff182SThomas Petazzoni return NETDEV_TX_OK; 3001c5aff182SThomas Petazzoni } 3002c5aff182SThomas Petazzoni 3003c5aff182SThomas Petazzoni 3004c5aff182SThomas Petazzoni /* Free tx resources, when resetting a port */ 3005c5aff182SThomas Petazzoni static void mvneta_txq_done_force(struct mvneta_port *pp, 3006c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 3007c5aff182SThomas Petazzoni 3008c5aff182SThomas Petazzoni { 3009a29b6235SMarcin Wojtas struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 3010c5aff182SThomas Petazzoni int tx_done = txq->count; 3011c5aff182SThomas Petazzoni 3012632bb64fSLorenzo Bianconi mvneta_txq_bufs_free(pp, txq, tx_done, nq, false); 3013c5aff182SThomas Petazzoni 3014c5aff182SThomas Petazzoni /* reset txq */ 3015c5aff182SThomas Petazzoni txq->count = 0; 3016c5aff182SThomas Petazzoni txq->txq_put_index = 0; 3017c5aff182SThomas Petazzoni txq->txq_get_index = 0; 3018c5aff182SThomas Petazzoni } 3019c5aff182SThomas Petazzoni 30206c498974Swilly tarreau /* Handle tx done - called in softirq context. The <cause_tx_done> argument 30216c498974Swilly tarreau * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. 30226c498974Swilly tarreau */ 30230713a86aSArnaud Ebalard static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) 3024c5aff182SThomas Petazzoni { 3025c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq; 3026c5aff182SThomas Petazzoni struct netdev_queue *nq; 3027bd9f1ee3SJisheng Zhang int cpu = smp_processor_id(); 3028c5aff182SThomas Petazzoni 30296c498974Swilly tarreau while (cause_tx_done) { 3030c5aff182SThomas Petazzoni txq = mvneta_tx_done_policy(pp, cause_tx_done); 3031c5aff182SThomas Petazzoni 3032c5aff182SThomas Petazzoni nq = netdev_get_tx_queue(pp->dev, txq->id); 3033bd9f1ee3SJisheng Zhang __netif_tx_lock(nq, cpu); 3034c5aff182SThomas Petazzoni 30350713a86aSArnaud Ebalard if (txq->count) 30360713a86aSArnaud Ebalard mvneta_txq_done(pp, txq); 3037c5aff182SThomas Petazzoni 3038c5aff182SThomas Petazzoni __netif_tx_unlock(nq); 3039c5aff182SThomas Petazzoni cause_tx_done &= ~((1 << txq->id)); 3040c5aff182SThomas Petazzoni } 3041c5aff182SThomas Petazzoni } 3042c5aff182SThomas Petazzoni 30436a20c175SThomas Petazzoni /* Compute crc8 of the specified address, using a unique algorithm , 3044c5aff182SThomas Petazzoni * according to hw spec, different than generic crc8 algorithm 3045c5aff182SThomas Petazzoni */ 3046c5aff182SThomas Petazzoni static int mvneta_addr_crc(unsigned char *addr) 3047c5aff182SThomas Petazzoni { 3048c5aff182SThomas Petazzoni int crc = 0; 3049c5aff182SThomas Petazzoni int i; 3050c5aff182SThomas Petazzoni 3051c5aff182SThomas Petazzoni for (i = 0; i < ETH_ALEN; i++) { 3052c5aff182SThomas Petazzoni int j; 3053c5aff182SThomas Petazzoni 3054c5aff182SThomas Petazzoni crc = (crc ^ addr[i]) << 8; 3055c5aff182SThomas Petazzoni for (j = 7; j >= 0; j--) { 3056c5aff182SThomas Petazzoni if (crc & (0x100 << j)) 3057c5aff182SThomas Petazzoni crc ^= 0x107 << j; 3058c5aff182SThomas Petazzoni } 3059c5aff182SThomas Petazzoni } 3060c5aff182SThomas Petazzoni 3061c5aff182SThomas Petazzoni return crc; 3062c5aff182SThomas Petazzoni } 3063c5aff182SThomas Petazzoni 3064c5aff182SThomas Petazzoni /* This method controls the net device special MAC multicast support. 3065c5aff182SThomas Petazzoni * The Special Multicast Table for MAC addresses supports MAC of the form 3066c5aff182SThomas Petazzoni * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 3067c5aff182SThomas Petazzoni * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 3068c5aff182SThomas Petazzoni * Table entries in the DA-Filter table. This method set the Special 3069c5aff182SThomas Petazzoni * Multicast Table appropriate entry. 3070c5aff182SThomas Petazzoni */ 3071c5aff182SThomas Petazzoni static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, 3072c5aff182SThomas Petazzoni unsigned char last_byte, 3073c5aff182SThomas Petazzoni int queue) 3074c5aff182SThomas Petazzoni { 3075c5aff182SThomas Petazzoni unsigned int smc_table_reg; 3076c5aff182SThomas Petazzoni unsigned int tbl_offset; 3077c5aff182SThomas Petazzoni unsigned int reg_offset; 3078c5aff182SThomas Petazzoni 3079c5aff182SThomas Petazzoni /* Register offset from SMC table base */ 3080c5aff182SThomas Petazzoni tbl_offset = (last_byte / 4); 3081c5aff182SThomas Petazzoni /* Entry offset within the above reg */ 3082c5aff182SThomas Petazzoni reg_offset = last_byte % 4; 3083c5aff182SThomas Petazzoni 3084c5aff182SThomas Petazzoni smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST 3085c5aff182SThomas Petazzoni + tbl_offset * 4)); 3086c5aff182SThomas Petazzoni 3087c5aff182SThomas Petazzoni if (queue == -1) 3088c5aff182SThomas Petazzoni smc_table_reg &= ~(0xff << (8 * reg_offset)); 3089c5aff182SThomas Petazzoni else { 3090c5aff182SThomas Petazzoni smc_table_reg &= ~(0xff << (8 * reg_offset)); 3091c5aff182SThomas Petazzoni smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 3092c5aff182SThomas Petazzoni } 3093c5aff182SThomas Petazzoni 3094c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, 3095c5aff182SThomas Petazzoni smc_table_reg); 3096c5aff182SThomas Petazzoni } 3097c5aff182SThomas Petazzoni 3098c5aff182SThomas Petazzoni /* This method controls the network device Other MAC multicast support. 3099c5aff182SThomas Petazzoni * The Other Multicast Table is used for multicast of another type. 3100c5aff182SThomas Petazzoni * A CRC-8 is used as an index to the Other Multicast Table entries 3101c5aff182SThomas Petazzoni * in the DA-Filter table. 3102c5aff182SThomas Petazzoni * The method gets the CRC-8 value from the calling routine and 3103c5aff182SThomas Petazzoni * sets the Other Multicast Table appropriate entry according to the 3104c5aff182SThomas Petazzoni * specified CRC-8 . 3105c5aff182SThomas Petazzoni */ 3106c5aff182SThomas Petazzoni static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, 3107c5aff182SThomas Petazzoni unsigned char crc8, 3108c5aff182SThomas Petazzoni int queue) 3109c5aff182SThomas Petazzoni { 3110c5aff182SThomas Petazzoni unsigned int omc_table_reg; 3111c5aff182SThomas Petazzoni unsigned int tbl_offset; 3112c5aff182SThomas Petazzoni unsigned int reg_offset; 3113c5aff182SThomas Petazzoni 3114c5aff182SThomas Petazzoni tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ 3115c5aff182SThomas Petazzoni reg_offset = crc8 % 4; /* Entry offset within the above reg */ 3116c5aff182SThomas Petazzoni 3117c5aff182SThomas Petazzoni omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); 3118c5aff182SThomas Petazzoni 3119c5aff182SThomas Petazzoni if (queue == -1) { 3120c5aff182SThomas Petazzoni /* Clear accepts frame bit at specified Other DA table entry */ 3121c5aff182SThomas Petazzoni omc_table_reg &= ~(0xff << (8 * reg_offset)); 3122c5aff182SThomas Petazzoni } else { 3123c5aff182SThomas Petazzoni omc_table_reg &= ~(0xff << (8 * reg_offset)); 3124c5aff182SThomas Petazzoni omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 3125c5aff182SThomas Petazzoni } 3126c5aff182SThomas Petazzoni 3127c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); 3128c5aff182SThomas Petazzoni } 3129c5aff182SThomas Petazzoni 3130c5aff182SThomas Petazzoni /* The network device supports multicast using two tables: 3131c5aff182SThomas Petazzoni * 1) Special Multicast Table for MAC addresses of the form 3132c5aff182SThomas Petazzoni * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 3133c5aff182SThomas Petazzoni * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 3134c5aff182SThomas Petazzoni * Table entries in the DA-Filter table. 3135c5aff182SThomas Petazzoni * 2) Other Multicast Table for multicast of another type. A CRC-8 value 3136c5aff182SThomas Petazzoni * is used as an index to the Other Multicast Table entries in the 3137c5aff182SThomas Petazzoni * DA-Filter table. 3138c5aff182SThomas Petazzoni */ 3139c5aff182SThomas Petazzoni static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, 3140c5aff182SThomas Petazzoni int queue) 3141c5aff182SThomas Petazzoni { 3142c5aff182SThomas Petazzoni unsigned char crc_result = 0; 3143c5aff182SThomas Petazzoni 3144c5aff182SThomas Petazzoni if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { 3145c5aff182SThomas Petazzoni mvneta_set_special_mcast_addr(pp, p_addr[5], queue); 3146c5aff182SThomas Petazzoni return 0; 3147c5aff182SThomas Petazzoni } 3148c5aff182SThomas Petazzoni 3149c5aff182SThomas Petazzoni crc_result = mvneta_addr_crc(p_addr); 3150c5aff182SThomas Petazzoni if (queue == -1) { 3151c5aff182SThomas Petazzoni if (pp->mcast_count[crc_result] == 0) { 3152c5aff182SThomas Petazzoni netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", 3153c5aff182SThomas Petazzoni crc_result); 3154c5aff182SThomas Petazzoni return -EINVAL; 3155c5aff182SThomas Petazzoni } 3156c5aff182SThomas Petazzoni 3157c5aff182SThomas Petazzoni pp->mcast_count[crc_result]--; 3158c5aff182SThomas Petazzoni if (pp->mcast_count[crc_result] != 0) { 3159c5aff182SThomas Petazzoni netdev_info(pp->dev, 3160c5aff182SThomas Petazzoni "After delete there are %d valid Mcast for crc8=0x%02x\n", 3161c5aff182SThomas Petazzoni pp->mcast_count[crc_result], crc_result); 3162c5aff182SThomas Petazzoni return -EINVAL; 3163c5aff182SThomas Petazzoni } 3164c5aff182SThomas Petazzoni } else 3165c5aff182SThomas Petazzoni pp->mcast_count[crc_result]++; 3166c5aff182SThomas Petazzoni 3167c5aff182SThomas Petazzoni mvneta_set_other_mcast_addr(pp, crc_result, queue); 3168c5aff182SThomas Petazzoni 3169c5aff182SThomas Petazzoni return 0; 3170c5aff182SThomas Petazzoni } 3171c5aff182SThomas Petazzoni 3172c5aff182SThomas Petazzoni /* Configure Fitering mode of Ethernet port */ 3173c5aff182SThomas Petazzoni static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, 3174c5aff182SThomas Petazzoni int is_promisc) 3175c5aff182SThomas Petazzoni { 3176c5aff182SThomas Petazzoni u32 port_cfg_reg, val; 3177c5aff182SThomas Petazzoni 3178c5aff182SThomas Petazzoni port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); 3179c5aff182SThomas Petazzoni 3180c5aff182SThomas Petazzoni val = mvreg_read(pp, MVNETA_TYPE_PRIO); 3181c5aff182SThomas Petazzoni 3182c5aff182SThomas Petazzoni /* Set / Clear UPM bit in port configuration register */ 3183c5aff182SThomas Petazzoni if (is_promisc) { 3184c5aff182SThomas Petazzoni /* Accept all Unicast addresses */ 3185c5aff182SThomas Petazzoni port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; 3186c5aff182SThomas Petazzoni val |= MVNETA_FORCE_UNI; 3187c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); 3188c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); 3189c5aff182SThomas Petazzoni } else { 3190c5aff182SThomas Petazzoni /* Reject all Unicast addresses */ 3191c5aff182SThomas Petazzoni port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; 3192c5aff182SThomas Petazzoni val &= ~MVNETA_FORCE_UNI; 3193c5aff182SThomas Petazzoni } 3194c5aff182SThomas Petazzoni 3195c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); 3196c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TYPE_PRIO, val); 3197c5aff182SThomas Petazzoni } 3198c5aff182SThomas Petazzoni 3199c5aff182SThomas Petazzoni /* register unicast and multicast addresses */ 3200c5aff182SThomas Petazzoni static void mvneta_set_rx_mode(struct net_device *dev) 3201c5aff182SThomas Petazzoni { 3202c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 3203c5aff182SThomas Petazzoni struct netdev_hw_addr *ha; 3204c5aff182SThomas Petazzoni 3205c5aff182SThomas Petazzoni if (dev->flags & IFF_PROMISC) { 3206c5aff182SThomas Petazzoni /* Accept all: Multicast + Unicast */ 3207c5aff182SThomas Petazzoni mvneta_rx_unicast_promisc_set(pp, 1); 320890b74c01SGregory CLEMENT mvneta_set_ucast_table(pp, pp->rxq_def); 320990b74c01SGregory CLEMENT mvneta_set_special_mcast_table(pp, pp->rxq_def); 321090b74c01SGregory CLEMENT mvneta_set_other_mcast_table(pp, pp->rxq_def); 3211c5aff182SThomas Petazzoni } else { 3212c5aff182SThomas Petazzoni /* Accept single Unicast */ 3213c5aff182SThomas Petazzoni mvneta_rx_unicast_promisc_set(pp, 0); 3214c5aff182SThomas Petazzoni mvneta_set_ucast_table(pp, -1); 321590b74c01SGregory CLEMENT mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); 3216c5aff182SThomas Petazzoni 3217c5aff182SThomas Petazzoni if (dev->flags & IFF_ALLMULTI) { 3218c5aff182SThomas Petazzoni /* Accept all multicast */ 321990b74c01SGregory CLEMENT mvneta_set_special_mcast_table(pp, pp->rxq_def); 322090b74c01SGregory CLEMENT mvneta_set_other_mcast_table(pp, pp->rxq_def); 3221c5aff182SThomas Petazzoni } else { 3222c5aff182SThomas Petazzoni /* Accept only initialized multicast */ 3223c5aff182SThomas Petazzoni mvneta_set_special_mcast_table(pp, -1); 3224c5aff182SThomas Petazzoni mvneta_set_other_mcast_table(pp, -1); 3225c5aff182SThomas Petazzoni 3226c5aff182SThomas Petazzoni if (!netdev_mc_empty(dev)) { 3227c5aff182SThomas Petazzoni netdev_for_each_mc_addr(ha, dev) { 3228c5aff182SThomas Petazzoni mvneta_mcast_addr_set(pp, ha->addr, 322990b74c01SGregory CLEMENT pp->rxq_def); 3230c5aff182SThomas Petazzoni } 3231c5aff182SThomas Petazzoni } 3232c5aff182SThomas Petazzoni } 3233c5aff182SThomas Petazzoni } 3234c5aff182SThomas Petazzoni } 3235c5aff182SThomas Petazzoni 3236c5aff182SThomas Petazzoni /* Interrupt handling - the callback for request_irq() */ 3237c5aff182SThomas Petazzoni static irqreturn_t mvneta_isr(int irq, void *dev_id) 3238c5aff182SThomas Petazzoni { 32392636ac3cSMarcin Wojtas struct mvneta_port *pp = (struct mvneta_port *)dev_id; 32402636ac3cSMarcin Wojtas 32412636ac3cSMarcin Wojtas mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 32422636ac3cSMarcin Wojtas napi_schedule(&pp->napi); 32432636ac3cSMarcin Wojtas 32442636ac3cSMarcin Wojtas return IRQ_HANDLED; 32452636ac3cSMarcin Wojtas } 32462636ac3cSMarcin Wojtas 32472636ac3cSMarcin Wojtas /* Interrupt handling - the callback for request_percpu_irq() */ 32482636ac3cSMarcin Wojtas static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id) 32492636ac3cSMarcin Wojtas { 325012bb03b4SMaxime Ripard struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; 3251c5aff182SThomas Petazzoni 325212bb03b4SMaxime Ripard disable_percpu_irq(port->pp->dev->irq); 325312bb03b4SMaxime Ripard napi_schedule(&port->napi); 3254c5aff182SThomas Petazzoni 3255c5aff182SThomas Petazzoni return IRQ_HANDLED; 3256c5aff182SThomas Petazzoni } 3257c5aff182SThomas Petazzoni 3258503f9aa9SRussell King static void mvneta_link_change(struct mvneta_port *pp) 3259898b2970SStas Sergeev { 3260898b2970SStas Sergeev u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 3261898b2970SStas Sergeev 3262503f9aa9SRussell King phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); 3263898b2970SStas Sergeev } 3264898b2970SStas Sergeev 3265c5aff182SThomas Petazzoni /* NAPI handler 3266c5aff182SThomas Petazzoni * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 3267c5aff182SThomas Petazzoni * packets on the corresponding TXQ (Bit 0 is for TX queue 1). 3268c5aff182SThomas Petazzoni * Bits 8 -15 of the cause Rx Tx register indicate that are received 3269c5aff182SThomas Petazzoni * packets on the corresponding RXQ (Bit 8 is for RX queue 0). 3270c5aff182SThomas Petazzoni * Each CPU has its own causeRxTx register 3271c5aff182SThomas Petazzoni */ 3272c5aff182SThomas Petazzoni static int mvneta_poll(struct napi_struct *napi, int budget) 3273c5aff182SThomas Petazzoni { 3274c5aff182SThomas Petazzoni int rx_done = 0; 3275c5aff182SThomas Petazzoni u32 cause_rx_tx; 32762dcf75e2SGregory CLEMENT int rx_queue; 3277c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(napi->dev); 327812bb03b4SMaxime Ripard struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); 3279c5aff182SThomas Petazzoni 3280c5aff182SThomas Petazzoni if (!netif_running(pp->dev)) { 32812636ac3cSMarcin Wojtas napi_complete(napi); 3282c5aff182SThomas Petazzoni return rx_done; 3283c5aff182SThomas Petazzoni } 3284c5aff182SThomas Petazzoni 3285c5aff182SThomas Petazzoni /* Read cause register */ 3286898b2970SStas Sergeev cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); 3287898b2970SStas Sergeev if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { 3288898b2970SStas Sergeev u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); 3289898b2970SStas Sergeev 3290898b2970SStas Sergeev mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 3291503f9aa9SRussell King 3292503f9aa9SRussell King if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE | 3293856b2cc5SRussell King MVNETA_CAUSE_LINK_CHANGE)) 3294503f9aa9SRussell King mvneta_link_change(pp); 3295898b2970SStas Sergeev } 329671f6d1b3Swilly tarreau 329771f6d1b3Swilly tarreau /* Release Tx descriptors */ 329871f6d1b3Swilly tarreau if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { 32990713a86aSArnaud Ebalard mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); 330071f6d1b3Swilly tarreau cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; 330171f6d1b3Swilly tarreau } 3302c5aff182SThomas Petazzoni 33036a20c175SThomas Petazzoni /* For the case where the last mvneta_poll did not process all 3304c5aff182SThomas Petazzoni * RX packets 3305c5aff182SThomas Petazzoni */ 33062636ac3cSMarcin Wojtas cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : 33072636ac3cSMarcin Wojtas port->cause_rx_tx; 33082dcf75e2SGregory CLEMENT 3309065fd83eSJisheng Zhang rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); 33102dcf75e2SGregory CLEMENT if (rx_queue) { 33112dcf75e2SGregory CLEMENT rx_queue = rx_queue - 1; 3312dc35a10fSMarcin Wojtas if (pp->bm_priv) 33137a86f05fSAndrew Lunn rx_done = mvneta_rx_hwbm(napi, pp, budget, 33147a86f05fSAndrew Lunn &pp->rxqs[rx_queue]); 3315dc35a10fSMarcin Wojtas else 33167a86f05fSAndrew Lunn rx_done = mvneta_rx_swbm(napi, pp, budget, 33177a86f05fSAndrew Lunn &pp->rxqs[rx_queue]); 33182dcf75e2SGregory CLEMENT } 33192dcf75e2SGregory CLEMENT 33206ad20165SEric Dumazet if (rx_done < budget) { 3321c5aff182SThomas Petazzoni cause_rx_tx = 0; 33226ad20165SEric Dumazet napi_complete_done(napi, rx_done); 33232636ac3cSMarcin Wojtas 33242636ac3cSMarcin Wojtas if (pp->neta_armada3700) { 33252636ac3cSMarcin Wojtas unsigned long flags; 33262636ac3cSMarcin Wojtas 33272636ac3cSMarcin Wojtas local_irq_save(flags); 33282636ac3cSMarcin Wojtas mvreg_write(pp, MVNETA_INTR_NEW_MASK, 33292636ac3cSMarcin Wojtas MVNETA_RX_INTR_MASK(rxq_number) | 33302636ac3cSMarcin Wojtas MVNETA_TX_INTR_MASK(txq_number) | 33312636ac3cSMarcin Wojtas MVNETA_MISCINTR_INTR_MASK); 33322636ac3cSMarcin Wojtas local_irq_restore(flags); 33332636ac3cSMarcin Wojtas } else { 333412bb03b4SMaxime Ripard enable_percpu_irq(pp->dev->irq, 0); 3335c5aff182SThomas Petazzoni } 33362636ac3cSMarcin Wojtas } 3337c5aff182SThomas Petazzoni 33382636ac3cSMarcin Wojtas if (pp->neta_armada3700) 33392636ac3cSMarcin Wojtas pp->cause_rx_tx = cause_rx_tx; 33402636ac3cSMarcin Wojtas else 334112bb03b4SMaxime Ripard port->cause_rx_tx = cause_rx_tx; 33422636ac3cSMarcin Wojtas 3343c5aff182SThomas Petazzoni return rx_done; 3344c5aff182SThomas Petazzoni } 3345c5aff182SThomas Petazzoni 3346568a3fa2SLorenzo Bianconi static int mvneta_create_page_pool(struct mvneta_port *pp, 3347568a3fa2SLorenzo Bianconi struct mvneta_rx_queue *rxq, int size) 3348568a3fa2SLorenzo Bianconi { 33490db51da7SLorenzo Bianconi struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); 3350568a3fa2SLorenzo Bianconi struct page_pool_params pp_params = { 3351568a3fa2SLorenzo Bianconi .order = 0, 335207e13edbSLorenzo Bianconi .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 3353568a3fa2SLorenzo Bianconi .pool_size = size, 33541657adccSLorenzo Bianconi .nid = NUMA_NO_NODE, 3355568a3fa2SLorenzo Bianconi .dev = pp->dev->dev.parent, 33560db51da7SLorenzo Bianconi .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 335707e13edbSLorenzo Bianconi .offset = pp->rx_offset_correction, 335807e13edbSLorenzo Bianconi .max_len = MVNETA_MAX_RX_BUF_SIZE, 3359568a3fa2SLorenzo Bianconi }; 3360568a3fa2SLorenzo Bianconi int err; 3361568a3fa2SLorenzo Bianconi 3362568a3fa2SLorenzo Bianconi rxq->page_pool = page_pool_create(&pp_params); 3363568a3fa2SLorenzo Bianconi if (IS_ERR(rxq->page_pool)) { 3364568a3fa2SLorenzo Bianconi err = PTR_ERR(rxq->page_pool); 3365568a3fa2SLorenzo Bianconi rxq->page_pool = NULL; 3366568a3fa2SLorenzo Bianconi return err; 3367568a3fa2SLorenzo Bianconi } 3368568a3fa2SLorenzo Bianconi 3369bf25146aSEelco Chaudron err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0, 3370bf25146aSEelco Chaudron PAGE_SIZE); 3371568a3fa2SLorenzo Bianconi if (err < 0) 3372568a3fa2SLorenzo Bianconi goto err_free_pp; 3373568a3fa2SLorenzo Bianconi 3374568a3fa2SLorenzo Bianconi err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 3375568a3fa2SLorenzo Bianconi rxq->page_pool); 3376568a3fa2SLorenzo Bianconi if (err) 3377568a3fa2SLorenzo Bianconi goto err_unregister_rxq; 3378568a3fa2SLorenzo Bianconi 3379568a3fa2SLorenzo Bianconi return 0; 3380568a3fa2SLorenzo Bianconi 3381568a3fa2SLorenzo Bianconi err_unregister_rxq: 3382568a3fa2SLorenzo Bianconi xdp_rxq_info_unreg(&rxq->xdp_rxq); 3383568a3fa2SLorenzo Bianconi err_free_pp: 3384568a3fa2SLorenzo Bianconi page_pool_destroy(rxq->page_pool); 3385568a3fa2SLorenzo Bianconi rxq->page_pool = NULL; 3386568a3fa2SLorenzo Bianconi return err; 3387568a3fa2SLorenzo Bianconi } 3388568a3fa2SLorenzo Bianconi 3389c5aff182SThomas Petazzoni /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ 3390c5aff182SThomas Petazzoni static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 3391c5aff182SThomas Petazzoni int num) 3392c5aff182SThomas Petazzoni { 3393568a3fa2SLorenzo Bianconi int i, err; 3394568a3fa2SLorenzo Bianconi 3395568a3fa2SLorenzo Bianconi err = mvneta_create_page_pool(pp, rxq, num); 3396568a3fa2SLorenzo Bianconi if (err < 0) 3397568a3fa2SLorenzo Bianconi return err; 3398c5aff182SThomas Petazzoni 3399c5aff182SThomas Petazzoni for (i = 0; i < num; i++) { 3400a1a65ab1Swilly tarreau memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); 34017e47fd84SGregory CLEMENT if (mvneta_rx_refill(pp, rxq->descs + i, rxq, 34027e47fd84SGregory CLEMENT GFP_KERNEL) != 0) { 34037e47fd84SGregory CLEMENT netdev_err(pp->dev, 34047e47fd84SGregory CLEMENT "%s:rxq %d, %d of %d buffs filled\n", 3405c5aff182SThomas Petazzoni __func__, rxq->id, i, num); 3406c5aff182SThomas Petazzoni break; 3407c5aff182SThomas Petazzoni } 3408c5aff182SThomas Petazzoni } 3409c5aff182SThomas Petazzoni 3410c5aff182SThomas Petazzoni /* Add this number of RX descriptors as non occupied (ready to 34116a20c175SThomas Petazzoni * get packets) 34126a20c175SThomas Petazzoni */ 3413c5aff182SThomas Petazzoni mvneta_rxq_non_occup_desc_add(pp, rxq, i); 3414c5aff182SThomas Petazzoni 3415c5aff182SThomas Petazzoni return i; 3416c5aff182SThomas Petazzoni } 3417c5aff182SThomas Petazzoni 3418c5aff182SThomas Petazzoni /* Free all packets pending transmit from all TXQs and reset TX port */ 3419c5aff182SThomas Petazzoni static void mvneta_tx_reset(struct mvneta_port *pp) 3420c5aff182SThomas Petazzoni { 3421c5aff182SThomas Petazzoni int queue; 3422c5aff182SThomas Petazzoni 34239672850bSEzequiel Garcia /* free the skb's in the tx ring */ 3424c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) 3425c5aff182SThomas Petazzoni mvneta_txq_done_force(pp, &pp->txqs[queue]); 3426c5aff182SThomas Petazzoni 3427c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 3428c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 3429c5aff182SThomas Petazzoni } 3430c5aff182SThomas Petazzoni 3431c5aff182SThomas Petazzoni static void mvneta_rx_reset(struct mvneta_port *pp) 3432c5aff182SThomas Petazzoni { 3433c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 3434c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 3435c5aff182SThomas Petazzoni } 3436c5aff182SThomas Petazzoni 3437c5aff182SThomas Petazzoni /* Rx/Tx queue initialization/cleanup methods */ 3438c5aff182SThomas Petazzoni 34394a188a63SJisheng Zhang static int mvneta_rxq_sw_init(struct mvneta_port *pp, 3440c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 3441c5aff182SThomas Petazzoni { 3442c5aff182SThomas Petazzoni rxq->size = pp->rx_ring_size; 3443c5aff182SThomas Petazzoni 3444c5aff182SThomas Petazzoni /* Allocate memory for RX descriptors */ 3445c5aff182SThomas Petazzoni rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 3446c5aff182SThomas Petazzoni rxq->size * MVNETA_DESC_ALIGNED_SIZE, 3447c5aff182SThomas Petazzoni &rxq->descs_phys, GFP_KERNEL); 3448f95936ccSMarkus Elfring if (!rxq->descs) 3449c5aff182SThomas Petazzoni return -ENOMEM; 3450c5aff182SThomas Petazzoni 3451c5aff182SThomas Petazzoni rxq->last_desc = rxq->size - 1; 3452c5aff182SThomas Petazzoni 34534a188a63SJisheng Zhang return 0; 34544a188a63SJisheng Zhang } 34554a188a63SJisheng Zhang 34564a188a63SJisheng Zhang static void mvneta_rxq_hw_init(struct mvneta_port *pp, 34574a188a63SJisheng Zhang struct mvneta_rx_queue *rxq) 34584a188a63SJisheng Zhang { 3459c5aff182SThomas Petazzoni /* Set Rx descriptors queue starting address */ 3460c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); 3461c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); 3462c5aff182SThomas Petazzoni 3463c5aff182SThomas Petazzoni /* Set coalescing pkts and time */ 3464c5aff182SThomas Petazzoni mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 3465c5aff182SThomas Petazzoni mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 3466c5aff182SThomas Petazzoni 3467dc35a10fSMarcin Wojtas if (!pp->bm_priv) { 3468562e2f46SYelena Krivosheev /* Set Offset */ 3469562e2f46SYelena Krivosheev mvneta_rxq_offset_set(pp, rxq, 0); 3470e735fd55SMarcin Wojtas mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? 34718dc9a088SLorenzo Bianconi MVNETA_MAX_RX_BUF_SIZE : 3472e735fd55SMarcin Wojtas MVNETA_RX_BUF_SIZE(pp->pkt_size)); 3473c5aff182SThomas Petazzoni mvneta_rxq_bm_disable(pp, rxq); 3474e9f64999SGregory CLEMENT mvneta_rxq_fill(pp, rxq, rxq->size); 3475dc35a10fSMarcin Wojtas } else { 3476562e2f46SYelena Krivosheev /* Set Offset */ 3477562e2f46SYelena Krivosheev mvneta_rxq_offset_set(pp, rxq, 3478562e2f46SYelena Krivosheev NET_SKB_PAD - pp->rx_offset_correction); 3479562e2f46SYelena Krivosheev 3480dc35a10fSMarcin Wojtas mvneta_rxq_bm_enable(pp, rxq); 3481562e2f46SYelena Krivosheev /* Fill RXQ with buffers from RX pool */ 3482dc35a10fSMarcin Wojtas mvneta_rxq_long_pool_set(pp, rxq); 3483dc35a10fSMarcin Wojtas mvneta_rxq_short_pool_set(pp, rxq); 3484e9f64999SGregory CLEMENT mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); 3485dc35a10fSMarcin Wojtas } 34864a188a63SJisheng Zhang } 34874a188a63SJisheng Zhang 34884a188a63SJisheng Zhang /* Create a specified RX queue */ 34894a188a63SJisheng Zhang static int mvneta_rxq_init(struct mvneta_port *pp, 34904a188a63SJisheng Zhang struct mvneta_rx_queue *rxq) 34914a188a63SJisheng Zhang 34924a188a63SJisheng Zhang { 34934a188a63SJisheng Zhang int ret; 34944a188a63SJisheng Zhang 34954a188a63SJisheng Zhang ret = mvneta_rxq_sw_init(pp, rxq); 34964a188a63SJisheng Zhang if (ret < 0) 34974a188a63SJisheng Zhang return ret; 34984a188a63SJisheng Zhang 34994a188a63SJisheng Zhang mvneta_rxq_hw_init(pp, rxq); 3500dc35a10fSMarcin Wojtas 3501c5aff182SThomas Petazzoni return 0; 3502c5aff182SThomas Petazzoni } 3503c5aff182SThomas Petazzoni 3504c5aff182SThomas Petazzoni /* Cleanup Rx queue */ 3505c5aff182SThomas Petazzoni static void mvneta_rxq_deinit(struct mvneta_port *pp, 3506c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq) 3507c5aff182SThomas Petazzoni { 3508c5aff182SThomas Petazzoni mvneta_rxq_drop_pkts(pp, rxq); 3509c5aff182SThomas Petazzoni 3510c5aff182SThomas Petazzoni if (rxq->descs) 3511c5aff182SThomas Petazzoni dma_free_coherent(pp->dev->dev.parent, 3512c5aff182SThomas Petazzoni rxq->size * MVNETA_DESC_ALIGNED_SIZE, 3513c5aff182SThomas Petazzoni rxq->descs, 3514c5aff182SThomas Petazzoni rxq->descs_phys); 3515c5aff182SThomas Petazzoni 3516c5aff182SThomas Petazzoni rxq->descs = NULL; 3517c5aff182SThomas Petazzoni rxq->last_desc = 0; 3518c5aff182SThomas Petazzoni rxq->next_desc_to_proc = 0; 3519c5aff182SThomas Petazzoni rxq->descs_phys = 0; 3520562e2f46SYelena Krivosheev rxq->first_to_refill = 0; 3521562e2f46SYelena Krivosheev rxq->refill_num = 0; 3522c5aff182SThomas Petazzoni } 3523c5aff182SThomas Petazzoni 35244a188a63SJisheng Zhang static int mvneta_txq_sw_init(struct mvneta_port *pp, 3525c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 3526c5aff182SThomas Petazzoni { 352733f4cefbSRussell King (Oracle) int cpu, err; 352850bf8cb6SGregory CLEMENT 3529c5aff182SThomas Petazzoni txq->size = pp->tx_ring_size; 3530c5aff182SThomas Petazzoni 35318eef5f97SEzequiel Garcia /* A queue must always have room for at least one skb. 35328eef5f97SEzequiel Garcia * Therefore, stop the queue when the free entries reaches 35338eef5f97SEzequiel Garcia * the maximum number of descriptors per skb. 35348eef5f97SEzequiel Garcia */ 35358eef5f97SEzequiel Garcia txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; 35368eef5f97SEzequiel Garcia txq->tx_wake_threshold = txq->tx_stop_threshold / 2; 35378eef5f97SEzequiel Garcia 3538c5aff182SThomas Petazzoni /* Allocate memory for TX descriptors */ 3539c5aff182SThomas Petazzoni txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 3540c5aff182SThomas Petazzoni txq->size * MVNETA_DESC_ALIGNED_SIZE, 3541c5aff182SThomas Petazzoni &txq->descs_phys, GFP_KERNEL); 3542f95936ccSMarkus Elfring if (!txq->descs) 3543c5aff182SThomas Petazzoni return -ENOMEM; 3544c5aff182SThomas Petazzoni 3545c5aff182SThomas Petazzoni txq->last_desc = txq->size - 1; 3546c5aff182SThomas Petazzoni 35479e58c8b4SLorenzo Bianconi txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); 3548f4544e53STom Rix if (!txq->buf) 3549c5aff182SThomas Petazzoni return -ENOMEM; 35502adb719dSEzequiel Garcia 35512adb719dSEzequiel Garcia /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 355233f4cefbSRussell King (Oracle) err = mvneta_alloc_tso_hdrs(pp, txq); 355333f4cefbSRussell King (Oracle) if (err) 355433f4cefbSRussell King (Oracle) return err; 3555c5aff182SThomas Petazzoni 355650bf8cb6SGregory CLEMENT /* Setup XPS mapping */ 3557cf9bf871SMaxime Chevallier if (pp->neta_armada3700) 3558cf9bf871SMaxime Chevallier cpu = 0; 3559cf9bf871SMaxime Chevallier else if (txq_number > 1) 356050bf8cb6SGregory CLEMENT cpu = txq->id % num_present_cpus(); 356150bf8cb6SGregory CLEMENT else 356250bf8cb6SGregory CLEMENT cpu = pp->rxq_def % num_present_cpus(); 356350bf8cb6SGregory CLEMENT cpumask_set_cpu(cpu, &txq->affinity_mask); 356450bf8cb6SGregory CLEMENT netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); 356550bf8cb6SGregory CLEMENT 3566c5aff182SThomas Petazzoni return 0; 3567c5aff182SThomas Petazzoni } 3568c5aff182SThomas Petazzoni 35694a188a63SJisheng Zhang static void mvneta_txq_hw_init(struct mvneta_port *pp, 35704a188a63SJisheng Zhang struct mvneta_tx_queue *txq) 35714a188a63SJisheng Zhang { 35724a188a63SJisheng Zhang /* Set maximum bandwidth for enabled TXQs */ 35734a188a63SJisheng Zhang mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); 35744a188a63SJisheng Zhang mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); 35754a188a63SJisheng Zhang 35764a188a63SJisheng Zhang /* Set Tx descriptors queue starting address */ 35774a188a63SJisheng Zhang mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); 35784a188a63SJisheng Zhang mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); 35794a188a63SJisheng Zhang 35804a188a63SJisheng Zhang mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 35814a188a63SJisheng Zhang } 35824a188a63SJisheng Zhang 35834a188a63SJisheng Zhang /* Create and initialize a tx queue */ 35844a188a63SJisheng Zhang static int mvneta_txq_init(struct mvneta_port *pp, 35854a188a63SJisheng Zhang struct mvneta_tx_queue *txq) 35864a188a63SJisheng Zhang { 35874a188a63SJisheng Zhang int ret; 35884a188a63SJisheng Zhang 35894a188a63SJisheng Zhang ret = mvneta_txq_sw_init(pp, txq); 35904a188a63SJisheng Zhang if (ret < 0) 35914a188a63SJisheng Zhang return ret; 35924a188a63SJisheng Zhang 35934a188a63SJisheng Zhang mvneta_txq_hw_init(pp, txq); 35944a188a63SJisheng Zhang 35954a188a63SJisheng Zhang return 0; 35964a188a63SJisheng Zhang } 35974a188a63SJisheng Zhang 3598c5aff182SThomas Petazzoni /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ 35994a188a63SJisheng Zhang static void mvneta_txq_sw_deinit(struct mvneta_port *pp, 3600c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq) 3601c5aff182SThomas Petazzoni { 3602a29b6235SMarcin Wojtas struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 3603a29b6235SMarcin Wojtas 36049e58c8b4SLorenzo Bianconi kfree(txq->buf); 3605c5aff182SThomas Petazzoni 360633f4cefbSRussell King (Oracle) mvneta_free_tso_hdrs(pp, txq); 3607c5aff182SThomas Petazzoni if (txq->descs) 3608c5aff182SThomas Petazzoni dma_free_coherent(pp->dev->dev.parent, 3609c5aff182SThomas Petazzoni txq->size * MVNETA_DESC_ALIGNED_SIZE, 3610c5aff182SThomas Petazzoni txq->descs, txq->descs_phys); 3611c5aff182SThomas Petazzoni 3612a29b6235SMarcin Wojtas netdev_tx_reset_queue(nq); 3613a29b6235SMarcin Wojtas 36142960a2d3SRussell King (Oracle) txq->buf = NULL; 3615c5aff182SThomas Petazzoni txq->descs = NULL; 3616c5aff182SThomas Petazzoni txq->last_desc = 0; 3617c5aff182SThomas Petazzoni txq->next_desc_to_proc = 0; 3618c5aff182SThomas Petazzoni txq->descs_phys = 0; 36194a188a63SJisheng Zhang } 3620c5aff182SThomas Petazzoni 36214a188a63SJisheng Zhang static void mvneta_txq_hw_deinit(struct mvneta_port *pp, 36224a188a63SJisheng Zhang struct mvneta_tx_queue *txq) 36234a188a63SJisheng Zhang { 3624c5aff182SThomas Petazzoni /* Set minimum bandwidth for disabled TXQs */ 3625c5aff182SThomas Petazzoni mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); 3626c5aff182SThomas Petazzoni mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); 3627c5aff182SThomas Petazzoni 3628c5aff182SThomas Petazzoni /* Set Tx descriptors queue starting address and size */ 3629c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); 3630c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); 3631c5aff182SThomas Petazzoni } 3632c5aff182SThomas Petazzoni 36334a188a63SJisheng Zhang static void mvneta_txq_deinit(struct mvneta_port *pp, 36344a188a63SJisheng Zhang struct mvneta_tx_queue *txq) 36354a188a63SJisheng Zhang { 36364a188a63SJisheng Zhang mvneta_txq_sw_deinit(pp, txq); 36374a188a63SJisheng Zhang mvneta_txq_hw_deinit(pp, txq); 36384a188a63SJisheng Zhang } 36394a188a63SJisheng Zhang 3640c5aff182SThomas Petazzoni /* Cleanup all Tx queues */ 3641c5aff182SThomas Petazzoni static void mvneta_cleanup_txqs(struct mvneta_port *pp) 3642c5aff182SThomas Petazzoni { 3643c5aff182SThomas Petazzoni int queue; 3644c5aff182SThomas Petazzoni 3645c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) 3646c5aff182SThomas Petazzoni mvneta_txq_deinit(pp, &pp->txqs[queue]); 3647c5aff182SThomas Petazzoni } 3648c5aff182SThomas Petazzoni 3649c5aff182SThomas Petazzoni /* Cleanup all Rx queues */ 3650c5aff182SThomas Petazzoni static void mvneta_cleanup_rxqs(struct mvneta_port *pp) 3651c5aff182SThomas Petazzoni { 36522dcf75e2SGregory CLEMENT int queue; 36532dcf75e2SGregory CLEMENT 3654ca5902a6SYelena Krivosheev for (queue = 0; queue < rxq_number; queue++) 36552dcf75e2SGregory CLEMENT mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 3656c5aff182SThomas Petazzoni } 3657c5aff182SThomas Petazzoni 3658c5aff182SThomas Petazzoni 3659c5aff182SThomas Petazzoni /* Init all Rx queues */ 3660c5aff182SThomas Petazzoni static int mvneta_setup_rxqs(struct mvneta_port *pp) 3661c5aff182SThomas Petazzoni { 36622dcf75e2SGregory CLEMENT int queue; 36632dcf75e2SGregory CLEMENT 36642dcf75e2SGregory CLEMENT for (queue = 0; queue < rxq_number; queue++) { 36652dcf75e2SGregory CLEMENT int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); 36662dcf75e2SGregory CLEMENT 3667c5aff182SThomas Petazzoni if (err) { 3668c5aff182SThomas Petazzoni netdev_err(pp->dev, "%s: can't create rxq=%d\n", 36692dcf75e2SGregory CLEMENT __func__, queue); 3670c5aff182SThomas Petazzoni mvneta_cleanup_rxqs(pp); 3671c5aff182SThomas Petazzoni return err; 3672c5aff182SThomas Petazzoni } 36732dcf75e2SGregory CLEMENT } 3674c5aff182SThomas Petazzoni 3675c5aff182SThomas Petazzoni return 0; 3676c5aff182SThomas Petazzoni } 3677c5aff182SThomas Petazzoni 3678c5aff182SThomas Petazzoni /* Init all tx queues */ 3679c5aff182SThomas Petazzoni static int mvneta_setup_txqs(struct mvneta_port *pp) 3680c5aff182SThomas Petazzoni { 3681c5aff182SThomas Petazzoni int queue; 3682c5aff182SThomas Petazzoni 3683c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 3684c5aff182SThomas Petazzoni int err = mvneta_txq_init(pp, &pp->txqs[queue]); 3685c5aff182SThomas Petazzoni if (err) { 3686c5aff182SThomas Petazzoni netdev_err(pp->dev, "%s: can't create txq=%d\n", 3687c5aff182SThomas Petazzoni __func__, queue); 3688c5aff182SThomas Petazzoni mvneta_cleanup_txqs(pp); 3689c5aff182SThomas Petazzoni return err; 3690c5aff182SThomas Petazzoni } 3691c5aff182SThomas Petazzoni } 3692c5aff182SThomas Petazzoni 3693c5aff182SThomas Petazzoni return 0; 3694c5aff182SThomas Petazzoni } 3695c5aff182SThomas Petazzoni 3696b4748553SSascha Hauer static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) 3697031b922bSMarek Behún { 3698031b922bSMarek Behún int ret; 3699031b922bSMarek Behún 3700b4748553SSascha Hauer ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); 3701031b922bSMarek Behún if (ret) 3702031b922bSMarek Behún return ret; 3703031b922bSMarek Behún 3704031b922bSMarek Behún return phy_power_on(pp->comphy); 3705031b922bSMarek Behún } 3706031b922bSMarek Behún 3707b4748553SSascha Hauer static int mvneta_config_interface(struct mvneta_port *pp, 3708b4748553SSascha Hauer phy_interface_t interface) 3709b4748553SSascha Hauer { 3710b4748553SSascha Hauer int ret = 0; 3711b4748553SSascha Hauer 3712b4748553SSascha Hauer if (pp->comphy) { 3713b4748553SSascha Hauer if (interface == PHY_INTERFACE_MODE_SGMII || 3714b4748553SSascha Hauer interface == PHY_INTERFACE_MODE_1000BASEX || 3715b4748553SSascha Hauer interface == PHY_INTERFACE_MODE_2500BASEX) { 3716b4748553SSascha Hauer ret = mvneta_comphy_init(pp, interface); 3717b4748553SSascha Hauer } 3718b4748553SSascha Hauer } else { 3719b4748553SSascha Hauer switch (interface) { 3720b4748553SSascha Hauer case PHY_INTERFACE_MODE_QSGMII: 3721b4748553SSascha Hauer mvreg_write(pp, MVNETA_SERDES_CFG, 3722b4748553SSascha Hauer MVNETA_QSGMII_SERDES_PROTO); 3723b4748553SSascha Hauer break; 3724b4748553SSascha Hauer 3725b4748553SSascha Hauer case PHY_INTERFACE_MODE_SGMII: 3726b4748553SSascha Hauer case PHY_INTERFACE_MODE_1000BASEX: 3727b4748553SSascha Hauer mvreg_write(pp, MVNETA_SERDES_CFG, 3728b4748553SSascha Hauer MVNETA_SGMII_SERDES_PROTO); 3729b4748553SSascha Hauer break; 37301a642ca7SSascha Hauer 37311a642ca7SSascha Hauer case PHY_INTERFACE_MODE_2500BASEX: 37321a642ca7SSascha Hauer mvreg_write(pp, MVNETA_SERDES_CFG, 37331a642ca7SSascha Hauer MVNETA_HSGMII_SERDES_PROTO); 37341a642ca7SSascha Hauer break; 3735b4748553SSascha Hauer default: 3736d3d239dcSSascha Hauer break; 3737b4748553SSascha Hauer } 3738b4748553SSascha Hauer } 3739b4748553SSascha Hauer 3740b4748553SSascha Hauer pp->phy_interface = interface; 3741b4748553SSascha Hauer 3742b4748553SSascha Hauer return ret; 3743b4748553SSascha Hauer } 3744b4748553SSascha Hauer 3745c5aff182SThomas Petazzoni static void mvneta_start_dev(struct mvneta_port *pp) 3746c5aff182SThomas Petazzoni { 37476b125d63SGregory CLEMENT int cpu; 374812bb03b4SMaxime Ripard 3749b4748553SSascha Hauer WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); 3750a10c1c81SRussell King 3751c5aff182SThomas Petazzoni mvneta_max_rx_size_set(pp, pp->pkt_size); 3752c5aff182SThomas Petazzoni mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 3753c5aff182SThomas Petazzoni 3754c5aff182SThomas Petazzoni /* start the Rx/Tx activity */ 3755c5aff182SThomas Petazzoni mvneta_port_enable(pp); 3756c5aff182SThomas Petazzoni 37572636ac3cSMarcin Wojtas if (!pp->neta_armada3700) { 3758c5aff182SThomas Petazzoni /* Enable polling on the port */ 3759129219e4SGregory CLEMENT for_each_online_cpu(cpu) { 37602636ac3cSMarcin Wojtas struct mvneta_pcpu_port *port = 37612636ac3cSMarcin Wojtas per_cpu_ptr(pp->ports, cpu); 376212bb03b4SMaxime Ripard 376312bb03b4SMaxime Ripard napi_enable(&port->napi); 376412bb03b4SMaxime Ripard } 37652636ac3cSMarcin Wojtas } else { 37662636ac3cSMarcin Wojtas napi_enable(&pp->napi); 37672636ac3cSMarcin Wojtas } 3768c5aff182SThomas Petazzoni 37692dcf75e2SGregory CLEMENT /* Unmask interrupts. It has to be done from each CPU */ 37706b125d63SGregory CLEMENT on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 37716b125d63SGregory CLEMENT 3772898b2970SStas Sergeev mvreg_write(pp, MVNETA_INTR_MISC_MASK, 3773898b2970SStas Sergeev MVNETA_CAUSE_PHY_STATUS_CHANGE | 3774856b2cc5SRussell King MVNETA_CAUSE_LINK_CHANGE); 3775c5aff182SThomas Petazzoni 3776503f9aa9SRussell King phylink_start(pp->phylink); 377761b5cc20SDaniel González Cabanelas 37785ba2254bSJisheng Zhang /* We may have called phylink_speed_down before */ 377961b5cc20SDaniel González Cabanelas phylink_speed_up(pp->phylink); 378061b5cc20SDaniel González Cabanelas 3781c5aff182SThomas Petazzoni netif_tx_start_all_queues(pp->dev); 378262a502ccSLorenzo Bianconi 378362a502ccSLorenzo Bianconi clear_bit(__MVNETA_DOWN, &pp->state); 3784c5aff182SThomas Petazzoni } 3785c5aff182SThomas Petazzoni 3786c5aff182SThomas Petazzoni static void mvneta_stop_dev(struct mvneta_port *pp) 3787c5aff182SThomas Petazzoni { 378812bb03b4SMaxime Ripard unsigned int cpu; 378912bb03b4SMaxime Ripard 379062a502ccSLorenzo Bianconi set_bit(__MVNETA_DOWN, &pp->state); 379162a502ccSLorenzo Bianconi 379261b5cc20SDaniel González Cabanelas if (device_may_wakeup(&pp->dev->dev)) 379361b5cc20SDaniel González Cabanelas phylink_speed_down(pp->phylink, false); 379461b5cc20SDaniel González Cabanelas 3795503f9aa9SRussell King phylink_stop(pp->phylink); 3796c5aff182SThomas Petazzoni 37972636ac3cSMarcin Wojtas if (!pp->neta_armada3700) { 3798129219e4SGregory CLEMENT for_each_online_cpu(cpu) { 37992636ac3cSMarcin Wojtas struct mvneta_pcpu_port *port = 38002636ac3cSMarcin Wojtas per_cpu_ptr(pp->ports, cpu); 380112bb03b4SMaxime Ripard 380212bb03b4SMaxime Ripard napi_disable(&port->napi); 380312bb03b4SMaxime Ripard } 38042636ac3cSMarcin Wojtas } else { 38052636ac3cSMarcin Wojtas napi_disable(&pp->napi); 38062636ac3cSMarcin Wojtas } 3807c5aff182SThomas Petazzoni 3808c5aff182SThomas Petazzoni netif_carrier_off(pp->dev); 3809c5aff182SThomas Petazzoni 3810c5aff182SThomas Petazzoni mvneta_port_down(pp); 3811c5aff182SThomas Petazzoni netif_tx_stop_all_queues(pp->dev); 3812c5aff182SThomas Petazzoni 3813c5aff182SThomas Petazzoni /* Stop the port activity */ 3814c5aff182SThomas Petazzoni mvneta_port_disable(pp); 3815c5aff182SThomas Petazzoni 3816c5aff182SThomas Petazzoni /* Clear all ethernet port interrupts */ 3817db488c10SGregory CLEMENT on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); 3818c5aff182SThomas Petazzoni 3819c5aff182SThomas Petazzoni /* Mask all ethernet port interrupts */ 3820db488c10SGregory CLEMENT on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 3821c5aff182SThomas Petazzoni 3822c5aff182SThomas Petazzoni mvneta_tx_reset(pp); 3823c5aff182SThomas Petazzoni mvneta_rx_reset(pp); 3824a10c1c81SRussell King 3825a10c1c81SRussell King WARN_ON(phy_power_off(pp->comphy)); 3826c5aff182SThomas Petazzoni } 3827c5aff182SThomas Petazzoni 3828db5dd0dbSMarcin Wojtas static void mvneta_percpu_enable(void *arg) 3829db5dd0dbSMarcin Wojtas { 3830db5dd0dbSMarcin Wojtas struct mvneta_port *pp = arg; 3831db5dd0dbSMarcin Wojtas 3832db5dd0dbSMarcin Wojtas enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); 3833db5dd0dbSMarcin Wojtas } 3834db5dd0dbSMarcin Wojtas 3835db5dd0dbSMarcin Wojtas static void mvneta_percpu_disable(void *arg) 3836db5dd0dbSMarcin Wojtas { 3837db5dd0dbSMarcin Wojtas struct mvneta_port *pp = arg; 3838db5dd0dbSMarcin Wojtas 3839db5dd0dbSMarcin Wojtas disable_percpu_irq(pp->dev->irq); 3840db5dd0dbSMarcin Wojtas } 3841db5dd0dbSMarcin Wojtas 3842c5aff182SThomas Petazzoni /* Change the device mtu */ 3843c5aff182SThomas Petazzoni static int mvneta_change_mtu(struct net_device *dev, int mtu) 3844c5aff182SThomas Petazzoni { 3845c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 3846e121d270SLorenzo Bianconi struct bpf_prog *prog = pp->xdp_prog; 3847c5aff182SThomas Petazzoni int ret; 3848c5aff182SThomas Petazzoni 38495777987eSJarod Wilson if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { 38505777987eSJarod Wilson netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", 38515777987eSJarod Wilson mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); 38525777987eSJarod Wilson mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); 38535777987eSJarod Wilson } 3854c5aff182SThomas Petazzoni 3855e121d270SLorenzo Bianconi if (prog && !prog->aux->xdp_has_frags && 3856e121d270SLorenzo Bianconi mtu > MVNETA_MAX_RX_BUF_SIZE) { 3857e121d270SLorenzo Bianconi netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n", 3858e121d270SLorenzo Bianconi mtu); 3859e121d270SLorenzo Bianconi 38600db51da7SLorenzo Bianconi return -EINVAL; 38610db51da7SLorenzo Bianconi } 38620db51da7SLorenzo Bianconi 3863c5aff182SThomas Petazzoni dev->mtu = mtu; 3864c5aff182SThomas Petazzoni 3865b65657fcSSimon Guinot if (!netif_running(dev)) { 3866dc35a10fSMarcin Wojtas if (pp->bm_priv) 3867dc35a10fSMarcin Wojtas mvneta_bm_update_mtu(pp, mtu); 3868dc35a10fSMarcin Wojtas 3869b65657fcSSimon Guinot netdev_update_features(dev); 3870c5aff182SThomas Petazzoni return 0; 3871b65657fcSSimon Guinot } 3872c5aff182SThomas Petazzoni 38736a20c175SThomas Petazzoni /* The interface is running, so we have to force a 3874a92dbd96SEzequiel Garcia * reallocation of the queues 3875c5aff182SThomas Petazzoni */ 3876c5aff182SThomas Petazzoni mvneta_stop_dev(pp); 3877db5dd0dbSMarcin Wojtas on_each_cpu(mvneta_percpu_disable, pp, true); 3878c5aff182SThomas Petazzoni 3879c5aff182SThomas Petazzoni mvneta_cleanup_txqs(pp); 3880c5aff182SThomas Petazzoni mvneta_cleanup_rxqs(pp); 3881c5aff182SThomas Petazzoni 3882dc35a10fSMarcin Wojtas if (pp->bm_priv) 3883dc35a10fSMarcin Wojtas mvneta_bm_update_mtu(pp, mtu); 3884dc35a10fSMarcin Wojtas 3885a92dbd96SEzequiel Garcia pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); 3886c5aff182SThomas Petazzoni 3887c5aff182SThomas Petazzoni ret = mvneta_setup_rxqs(pp); 3888c5aff182SThomas Petazzoni if (ret) { 3889a92dbd96SEzequiel Garcia netdev_err(dev, "unable to setup rxqs after MTU change\n"); 3890c5aff182SThomas Petazzoni return ret; 3891c5aff182SThomas Petazzoni } 3892c5aff182SThomas Petazzoni 3893a92dbd96SEzequiel Garcia ret = mvneta_setup_txqs(pp); 3894a92dbd96SEzequiel Garcia if (ret) { 3895a92dbd96SEzequiel Garcia netdev_err(dev, "unable to setup txqs after MTU change\n"); 3896a92dbd96SEzequiel Garcia return ret; 3897a92dbd96SEzequiel Garcia } 3898c5aff182SThomas Petazzoni 3899db5dd0dbSMarcin Wojtas on_each_cpu(mvneta_percpu_enable, pp, true); 3900c5aff182SThomas Petazzoni mvneta_start_dev(pp); 3901c5aff182SThomas Petazzoni 3902b65657fcSSimon Guinot netdev_update_features(dev); 3903b65657fcSSimon Guinot 3904c5aff182SThomas Petazzoni return 0; 3905c5aff182SThomas Petazzoni } 3906c5aff182SThomas Petazzoni 3907b65657fcSSimon Guinot static netdev_features_t mvneta_fix_features(struct net_device *dev, 3908b65657fcSSimon Guinot netdev_features_t features) 3909b65657fcSSimon Guinot { 3910b65657fcSSimon Guinot struct mvneta_port *pp = netdev_priv(dev); 3911b65657fcSSimon Guinot 3912b65657fcSSimon Guinot if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { 3913b65657fcSSimon Guinot features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 3914b65657fcSSimon Guinot netdev_info(dev, 3915b65657fcSSimon Guinot "Disable IP checksum for MTU greater than %dB\n", 3916b65657fcSSimon Guinot pp->tx_csum_limit); 3917b65657fcSSimon Guinot } 3918b65657fcSSimon Guinot 3919b65657fcSSimon Guinot return features; 3920b65657fcSSimon Guinot } 3921b65657fcSSimon Guinot 39228cc3e439SThomas Petazzoni /* Get mac address */ 39238cc3e439SThomas Petazzoni static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) 39248cc3e439SThomas Petazzoni { 39258cc3e439SThomas Petazzoni u32 mac_addr_l, mac_addr_h; 39268cc3e439SThomas Petazzoni 39278cc3e439SThomas Petazzoni mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); 39288cc3e439SThomas Petazzoni mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); 39298cc3e439SThomas Petazzoni addr[0] = (mac_addr_h >> 24) & 0xFF; 39308cc3e439SThomas Petazzoni addr[1] = (mac_addr_h >> 16) & 0xFF; 39318cc3e439SThomas Petazzoni addr[2] = (mac_addr_h >> 8) & 0xFF; 39328cc3e439SThomas Petazzoni addr[3] = mac_addr_h & 0xFF; 39338cc3e439SThomas Petazzoni addr[4] = (mac_addr_l >> 8) & 0xFF; 39348cc3e439SThomas Petazzoni addr[5] = mac_addr_l & 0xFF; 39358cc3e439SThomas Petazzoni } 39368cc3e439SThomas Petazzoni 3937c5aff182SThomas Petazzoni /* Handle setting mac address */ 3938c5aff182SThomas Petazzoni static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 3939c5aff182SThomas Petazzoni { 3940c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 3941e68de360SEzequiel Garcia struct sockaddr *sockaddr = addr; 3942e68de360SEzequiel Garcia int ret; 3943c5aff182SThomas Petazzoni 3944e68de360SEzequiel Garcia ret = eth_prepare_mac_addr_change(dev, addr); 3945e68de360SEzequiel Garcia if (ret < 0) 3946e68de360SEzequiel Garcia return ret; 3947c5aff182SThomas Petazzoni /* Remove previous address table entry */ 3948c5aff182SThomas Petazzoni mvneta_mac_addr_set(pp, dev->dev_addr, -1); 3949c5aff182SThomas Petazzoni 3950c5aff182SThomas Petazzoni /* Set new addr in hw */ 395190b74c01SGregory CLEMENT mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); 3952c5aff182SThomas Petazzoni 3953e68de360SEzequiel Garcia eth_commit_mac_addr_change(dev, addr); 3954c5aff182SThomas Petazzoni return 0; 3955c5aff182SThomas Petazzoni } 3956c5aff182SThomas Petazzoni 3957c2e7d2dfSRussell King static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs) 3958503f9aa9SRussell King { 3959c2e7d2dfSRussell King return container_of(pcs, struct mvneta_port, phylink_pcs); 3960503f9aa9SRussell King } 3961503f9aa9SRussell King 3962d8c36693SRussell King (Oracle) static int mvneta_pcs_validate(struct phylink_pcs *pcs, 3963d8c36693SRussell King (Oracle) unsigned long *supported, 3964d8c36693SRussell King (Oracle) const struct phylink_link_state *state) 3965d8c36693SRussell King (Oracle) { 3966d8c36693SRussell King (Oracle) /* We only support QSGMII, SGMII, 802.3z and RGMII modes. 3967d8c36693SRussell King (Oracle) * When in 802.3z mode, we must have AN enabled: 3968d8c36693SRussell King (Oracle) * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... 3969d8c36693SRussell King (Oracle) * When <PortType> = 1 (1000BASE-X) this field must be set to 1." 3970d8c36693SRussell King (Oracle) */ 3971d8c36693SRussell King (Oracle) if (phy_interface_mode_is_8023z(state->interface) && 3972d8c36693SRussell King (Oracle) !phylink_test(state->advertising, Autoneg)) 3973d8c36693SRussell King (Oracle) return -EINVAL; 3974d8c36693SRussell King (Oracle) 3975d8c36693SRussell King (Oracle) return 0; 3976d8c36693SRussell King (Oracle) } 3977d8c36693SRussell King (Oracle) 3978c2e7d2dfSRussell King static void mvneta_pcs_get_state(struct phylink_pcs *pcs, 3979503f9aa9SRussell King struct phylink_link_state *state) 3980c5aff182SThomas Petazzoni { 3981c2e7d2dfSRussell King struct mvneta_port *pp = mvneta_pcs_to_port(pcs); 3982503f9aa9SRussell King u32 gmac_stat; 3983c5aff182SThomas Petazzoni 3984503f9aa9SRussell King gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 3985503f9aa9SRussell King 3986503f9aa9SRussell King if (gmac_stat & MVNETA_GMAC_SPEED_1000) 3987a10c1c81SRussell King state->speed = 3988a10c1c81SRussell King state->interface == PHY_INTERFACE_MODE_2500BASEX ? 3989a10c1c81SRussell King SPEED_2500 : SPEED_1000; 3990503f9aa9SRussell King else if (gmac_stat & MVNETA_GMAC_SPEED_100) 3991503f9aa9SRussell King state->speed = SPEED_100; 3992503f9aa9SRussell King else 3993503f9aa9SRussell King state->speed = SPEED_10; 3994503f9aa9SRussell King 3995503f9aa9SRussell King state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); 3996503f9aa9SRussell King state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); 3997503f9aa9SRussell King state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); 3998503f9aa9SRussell King 39994932a918SRussell King if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) 40004932a918SRussell King state->pause |= MLO_PAUSE_RX; 40014932a918SRussell King if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) 40024932a918SRussell King state->pause |= MLO_PAUSE_TX; 4003503f9aa9SRussell King } 4004503f9aa9SRussell King 4005140d1002SRussell King (Oracle) static int mvneta_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 4006140d1002SRussell King (Oracle) phy_interface_t interface, 4007c2e7d2dfSRussell King const unsigned long *advertising, 4008c2e7d2dfSRussell King bool permit_pause_to_mac) 400922f4bf8aSRussell King { 4010c2e7d2dfSRussell King struct mvneta_port *pp = mvneta_pcs_to_port(pcs); 4011c2e7d2dfSRussell King u32 mask, val, an, old_an, changed; 4012c2e7d2dfSRussell King 4013c2e7d2dfSRussell King mask = MVNETA_GMAC_INBAND_AN_ENABLE | 4014c2e7d2dfSRussell King MVNETA_GMAC_INBAND_RESTART_AN | 4015c2e7d2dfSRussell King MVNETA_GMAC_AN_SPEED_EN | 4016c2e7d2dfSRussell King MVNETA_GMAC_AN_FLOW_CTRL_EN | 4017c2e7d2dfSRussell King MVNETA_GMAC_AN_DUPLEX_EN; 4018c2e7d2dfSRussell King 4019140d1002SRussell King (Oracle) if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { 4020c2e7d2dfSRussell King mask |= MVNETA_GMAC_CONFIG_MII_SPEED | 4021c2e7d2dfSRussell King MVNETA_GMAC_CONFIG_GMII_SPEED | 4022c2e7d2dfSRussell King MVNETA_GMAC_CONFIG_FULL_DUPLEX; 4023c2e7d2dfSRussell King val = MVNETA_GMAC_INBAND_AN_ENABLE; 4024c2e7d2dfSRussell King 4025c2e7d2dfSRussell King if (interface == PHY_INTERFACE_MODE_SGMII) { 4026c2e7d2dfSRussell King /* SGMII mode receives the speed and duplex from PHY */ 4027c2e7d2dfSRussell King val |= MVNETA_GMAC_AN_SPEED_EN | 4028c2e7d2dfSRussell King MVNETA_GMAC_AN_DUPLEX_EN; 4029c2e7d2dfSRussell King } else { 4030c2e7d2dfSRussell King /* 802.3z mode has fixed speed and duplex */ 4031c2e7d2dfSRussell King val |= MVNETA_GMAC_CONFIG_GMII_SPEED | 4032c2e7d2dfSRussell King MVNETA_GMAC_CONFIG_FULL_DUPLEX; 4033c2e7d2dfSRussell King 4034c2e7d2dfSRussell King /* The FLOW_CTRL_EN bit selects either the hardware 4035c2e7d2dfSRussell King * automatically or the CONFIG_FLOW_CTRL manually 4036c2e7d2dfSRussell King * controls the GMAC pause mode. 4037c2e7d2dfSRussell King */ 4038c2e7d2dfSRussell King if (permit_pause_to_mac) 4039c2e7d2dfSRussell King val |= MVNETA_GMAC_AN_FLOW_CTRL_EN; 4040c2e7d2dfSRussell King 4041c2e7d2dfSRussell King /* Update the advertisement bits */ 4042c2e7d2dfSRussell King mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; 4043c2e7d2dfSRussell King if (phylink_test(advertising, Pause)) 4044c2e7d2dfSRussell King val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; 4045c2e7d2dfSRussell King } 4046c2e7d2dfSRussell King } else { 4047c2e7d2dfSRussell King /* Phy or fixed speed - disable in-band AN modes */ 4048c2e7d2dfSRussell King val = 0; 4049c2e7d2dfSRussell King } 4050c2e7d2dfSRussell King 4051c2e7d2dfSRussell King old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4052c2e7d2dfSRussell King an = (an & ~mask) | val; 4053c2e7d2dfSRussell King changed = old_an ^ an; 4054c2e7d2dfSRussell King if (changed) 4055c2e7d2dfSRussell King mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an); 4056c2e7d2dfSRussell King 4057c2e7d2dfSRussell King /* We are only interested in the advertisement bits changing */ 4058c2e7d2dfSRussell King return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL); 4059c2e7d2dfSRussell King } 4060c2e7d2dfSRussell King 4061c2e7d2dfSRussell King static void mvneta_pcs_an_restart(struct phylink_pcs *pcs) 4062c2e7d2dfSRussell King { 4063c2e7d2dfSRussell King struct mvneta_port *pp = mvneta_pcs_to_port(pcs); 406422f4bf8aSRussell King u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 406522f4bf8aSRussell King 406622f4bf8aSRussell King mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 406722f4bf8aSRussell King gmac_an | MVNETA_GMAC_INBAND_RESTART_AN); 406822f4bf8aSRussell King mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 406922f4bf8aSRussell King gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); 407022f4bf8aSRussell King } 407122f4bf8aSRussell King 4072c2e7d2dfSRussell King static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = { 4073d8c36693SRussell King (Oracle) .pcs_validate = mvneta_pcs_validate, 4074c2e7d2dfSRussell King .pcs_get_state = mvneta_pcs_get_state, 4075c2e7d2dfSRussell King .pcs_config = mvneta_pcs_config, 4076c2e7d2dfSRussell King .pcs_an_restart = mvneta_pcs_an_restart, 4077c2e7d2dfSRussell King }; 4078c2e7d2dfSRussell King 40790ac4a71fSRussell King (Oracle) static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config, 40800ac4a71fSRussell King (Oracle) phy_interface_t interface) 40810ac4a71fSRussell King (Oracle) { 40820ac4a71fSRussell King (Oracle) struct net_device *ndev = to_net_dev(config->dev); 40830ac4a71fSRussell King (Oracle) struct mvneta_port *pp = netdev_priv(ndev); 40840ac4a71fSRussell King (Oracle) 40850ac4a71fSRussell King (Oracle) return &pp->phylink_pcs; 40860ac4a71fSRussell King (Oracle) } 40870ac4a71fSRussell King (Oracle) 40885a7d8953SRussell King static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode, 40895a7d8953SRussell King phy_interface_t interface) 40905a7d8953SRussell King { 40915a7d8953SRussell King struct net_device *ndev = to_net_dev(config->dev); 40925a7d8953SRussell King struct mvneta_port *pp = netdev_priv(ndev); 40935a7d8953SRussell King u32 val; 40945a7d8953SRussell King 40955a7d8953SRussell King if (pp->phy_interface != interface || 40965a7d8953SRussell King phylink_autoneg_inband(mode)) { 40975a7d8953SRussell King /* Force the link down when changing the interface or if in 40985a7d8953SRussell King * in-band mode. According to Armada 370 documentation, we 40995a7d8953SRussell King * can only change the port mode and in-band enable when the 41005a7d8953SRussell King * link is down. 41015a7d8953SRussell King */ 41025a7d8953SRussell King val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 41035a7d8953SRussell King val &= ~MVNETA_GMAC_FORCE_LINK_PASS; 41045a7d8953SRussell King val |= MVNETA_GMAC_FORCE_LINK_DOWN; 41055a7d8953SRussell King mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 41065a7d8953SRussell King } 41075a7d8953SRussell King 41085a7d8953SRussell King if (pp->phy_interface != interface) 41095a7d8953SRussell King WARN_ON(phy_power_off(pp->comphy)); 41105a7d8953SRussell King 41115a7d8953SRussell King /* Enable the 1ms clock */ 41125a7d8953SRussell King if (phylink_autoneg_inband(mode)) { 41135a7d8953SRussell King unsigned long rate = clk_get_rate(pp->clk); 41145a7d8953SRussell King 41155a7d8953SRussell King mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, 41165a7d8953SRussell King MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000)); 41175a7d8953SRussell King } 41185a7d8953SRussell King 41195a7d8953SRussell King return 0; 41205a7d8953SRussell King } 41215a7d8953SRussell King 412244cc27e4SIoana Ciornei static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, 4123503f9aa9SRussell King const struct phylink_link_state *state) 4124503f9aa9SRussell King { 412544cc27e4SIoana Ciornei struct net_device *ndev = to_net_dev(config->dev); 4126503f9aa9SRussell King struct mvneta_port *pp = netdev_priv(ndev); 412722f4bf8aSRussell King u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 4128503f9aa9SRussell King u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 4129da58a931SMaxime Chevallier u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); 4130503f9aa9SRussell King 413122f4bf8aSRussell King new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; 413232699954SRussell King new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | 413332699954SRussell King MVNETA_GMAC2_PORT_RESET); 4134da58a931SMaxime Chevallier new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); 4135c5aff182SThomas Petazzoni 413632699954SRussell King /* Even though it might look weird, when we're configured in 413732699954SRussell King * SGMII or QSGMII mode, the RGMII bit needs to be set. 413832699954SRussell King */ 413932699954SRussell King new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII; 414032699954SRussell King 414132699954SRussell King if (state->interface == PHY_INTERFACE_MODE_QSGMII || 414222f4bf8aSRussell King state->interface == PHY_INTERFACE_MODE_SGMII || 414322f4bf8aSRussell King phy_interface_mode_is_8023z(state->interface)) 414432699954SRussell King new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; 414532699954SRussell King 4146503f9aa9SRussell King if (!phylink_autoneg_inband(mode)) { 4147ff03f0b1SRussell King /* Phy or fixed speed - nothing to do, leave the 4148ff03f0b1SRussell King * configured speed, duplex and flow control as-is. 4149ff03f0b1SRussell King */ 415022f4bf8aSRussell King } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 4151503f9aa9SRussell King /* SGMII mode receives the state from the PHY */ 4152503f9aa9SRussell King new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; 415322f4bf8aSRussell King } else { 415422f4bf8aSRussell King /* 802.3z negotiation - only 1000base-X */ 415522f4bf8aSRussell King new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; 4156c5aff182SThomas Petazzoni } 4157c5aff182SThomas Petazzoni 4158da58a931SMaxime Chevallier /* When at 2.5G, the link partner can send frames with shortened 4159da58a931SMaxime Chevallier * preambles. 4160da58a931SMaxime Chevallier */ 4161f2ca673dSRussell King if (state->interface == PHY_INTERFACE_MODE_2500BASEX) 4162da58a931SMaxime Chevallier new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; 4163da58a931SMaxime Chevallier 416422f4bf8aSRussell King if (new_ctrl0 != gmac_ctrl0) 416522f4bf8aSRussell King mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); 4166503f9aa9SRussell King if (new_ctrl2 != gmac_ctrl2) 4167503f9aa9SRussell King mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); 4168da58a931SMaxime Chevallier if (new_ctrl4 != gmac_ctrl4) 4169da58a931SMaxime Chevallier mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); 417032699954SRussell King 417132699954SRussell King if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { 417232699954SRussell King while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 417332699954SRussell King MVNETA_GMAC2_PORT_RESET) != 0) 417432699954SRussell King continue; 417532699954SRussell King } 4176503f9aa9SRussell King } 4177503f9aa9SRussell King 41785a7d8953SRussell King static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode, 41795a7d8953SRussell King phy_interface_t interface) 41805a7d8953SRussell King { 41815a7d8953SRussell King struct net_device *ndev = to_net_dev(config->dev); 41825a7d8953SRussell King struct mvneta_port *pp = netdev_priv(ndev); 41835a7d8953SRussell King u32 val, clk; 41845a7d8953SRussell King 41855a7d8953SRussell King /* Disable 1ms clock if not in in-band mode */ 41865a7d8953SRussell King if (!phylink_autoneg_inband(mode)) { 41875a7d8953SRussell King clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); 41885a7d8953SRussell King clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; 41895a7d8953SRussell King mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk); 41905a7d8953SRussell King } 41915a7d8953SRussell King 41925a7d8953SRussell King if (pp->phy_interface != interface) 41935a7d8953SRussell King /* Enable the Serdes PHY */ 41945a7d8953SRussell King WARN_ON(mvneta_config_interface(pp, interface)); 41955a7d8953SRussell King 41965a7d8953SRussell King /* Allow the link to come up if in in-band mode, otherwise the 41975a7d8953SRussell King * link is forced via mac_link_down()/mac_link_up() 41985a7d8953SRussell King */ 41995a7d8953SRussell King if (phylink_autoneg_inband(mode)) { 42005a7d8953SRussell King val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 42015a7d8953SRussell King val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; 42025a7d8953SRussell King mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 42035a7d8953SRussell King } 42045a7d8953SRussell King 42055a7d8953SRussell King return 0; 42065a7d8953SRussell King } 42075a7d8953SRussell King 42086d81f451SRussell King static void mvneta_set_eee(struct mvneta_port *pp, bool enable) 42096d81f451SRussell King { 42106d81f451SRussell King u32 lpi_ctl1; 42116d81f451SRussell King 42126d81f451SRussell King lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); 42136d81f451SRussell King if (enable) 42146d81f451SRussell King lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; 42156d81f451SRussell King else 42166d81f451SRussell King lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; 42176d81f451SRussell King mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); 42186d81f451SRussell King } 42196d81f451SRussell King 422044cc27e4SIoana Ciornei static void mvneta_mac_link_down(struct phylink_config *config, 422144cc27e4SIoana Ciornei unsigned int mode, phy_interface_t interface) 4222fc548b99SRussell King { 422344cc27e4SIoana Ciornei struct net_device *ndev = to_net_dev(config->dev); 4224fc548b99SRussell King struct mvneta_port *pp = netdev_priv(ndev); 4225fc548b99SRussell King u32 val; 4226fc548b99SRussell King 4227503f9aa9SRussell King mvneta_port_down(pp); 4228503f9aa9SRussell King 4229503f9aa9SRussell King if (!phylink_autoneg_inband(mode)) { 4230fc548b99SRussell King val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4231fc548b99SRussell King val &= ~MVNETA_GMAC_FORCE_LINK_PASS; 4232fc548b99SRussell King val |= MVNETA_GMAC_FORCE_LINK_DOWN; 4233fc548b99SRussell King mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4234fc548b99SRussell King } 42356d81f451SRussell King 42366d81f451SRussell King pp->eee_active = false; 42376d81f451SRussell King mvneta_set_eee(pp, false); 4238fc548b99SRussell King } 4239fc548b99SRussell King 424091a208f2SRussell King static void mvneta_mac_link_up(struct phylink_config *config, 424191a208f2SRussell King struct phy_device *phy, 424291a208f2SRussell King unsigned int mode, phy_interface_t interface, 424391a208f2SRussell King int speed, int duplex, 424491a208f2SRussell King bool tx_pause, bool rx_pause) 4245fc548b99SRussell King { 424644cc27e4SIoana Ciornei struct net_device *ndev = to_net_dev(config->dev); 4247fc548b99SRussell King struct mvneta_port *pp = netdev_priv(ndev); 4248fc548b99SRussell King u32 val; 4249fc548b99SRussell King 4250503f9aa9SRussell King if (!phylink_autoneg_inband(mode)) { 4251fc548b99SRussell King val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4252ff03f0b1SRussell King val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN | 4253ff03f0b1SRussell King MVNETA_GMAC_CONFIG_MII_SPEED | 4254ff03f0b1SRussell King MVNETA_GMAC_CONFIG_GMII_SPEED | 4255ff03f0b1SRussell King MVNETA_GMAC_CONFIG_FLOW_CTRL | 4256ff03f0b1SRussell King MVNETA_GMAC_CONFIG_FULL_DUPLEX); 4257fc548b99SRussell King val |= MVNETA_GMAC_FORCE_LINK_PASS; 4258ff03f0b1SRussell King 4259ff03f0b1SRussell King if (speed == SPEED_1000 || speed == SPEED_2500) 4260ff03f0b1SRussell King val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 4261ff03f0b1SRussell King else if (speed == SPEED_100) 4262ff03f0b1SRussell King val |= MVNETA_GMAC_CONFIG_MII_SPEED; 4263ff03f0b1SRussell King 4264ff03f0b1SRussell King if (duplex == DUPLEX_FULL) 4265ff03f0b1SRussell King val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 4266ff03f0b1SRussell King 4267ff03f0b1SRussell King if (tx_pause || rx_pause) 4268ff03f0b1SRussell King val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; 4269ff03f0b1SRussell King 4270ff03f0b1SRussell King mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4271ff03f0b1SRussell King } else { 4272ff03f0b1SRussell King /* When inband doesn't cover flow control or flow control is 4273ff03f0b1SRussell King * disabled, we need to manually configure it. This bit will 4274ff03f0b1SRussell King * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset. 4275ff03f0b1SRussell King */ 4276ff03f0b1SRussell King val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4277ff03f0b1SRussell King val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL; 4278ff03f0b1SRussell King 4279ff03f0b1SRussell King if (tx_pause || rx_pause) 4280ff03f0b1SRussell King val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; 4281ff03f0b1SRussell King 4282fc548b99SRussell King mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4283fc548b99SRussell King } 4284fc548b99SRussell King 4285fc548b99SRussell King mvneta_port_up(pp); 42866d81f451SRussell King 42876d81f451SRussell King if (phy && pp->eee_enabled) { 428853243d41SJisheng Zhang pp->eee_active = phy_init_eee(phy, false) >= 0; 42896d81f451SRussell King mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); 42906d81f451SRussell King } 4291fc548b99SRussell King } 4292fc548b99SRussell King 4293503f9aa9SRussell King static const struct phylink_mac_ops mvneta_phylink_ops = { 42940ac4a71fSRussell King (Oracle) .mac_select_pcs = mvneta_mac_select_pcs, 42955a7d8953SRussell King .mac_prepare = mvneta_mac_prepare, 4296503f9aa9SRussell King .mac_config = mvneta_mac_config, 42975a7d8953SRussell King .mac_finish = mvneta_mac_finish, 4298503f9aa9SRussell King .mac_link_down = mvneta_mac_link_down, 4299503f9aa9SRussell King .mac_link_up = mvneta_mac_link_up, 4300503f9aa9SRussell King }; 4301c5aff182SThomas Petazzoni 4302c5aff182SThomas Petazzoni static int mvneta_mdio_probe(struct mvneta_port *pp) 4303c5aff182SThomas Petazzoni { 430482960fffSJisheng Zhang struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 4305503f9aa9SRussell King int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); 4306c5aff182SThomas Petazzoni 4307503f9aa9SRussell King if (err) 4308503f9aa9SRussell King netdev_err(pp->dev, "could not attach PHY: %d\n", err); 4309c5aff182SThomas Petazzoni 4310503f9aa9SRussell King phylink_ethtool_get_wol(pp->phylink, &wol); 431182960fffSJisheng Zhang device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); 431282960fffSJisheng Zhang 431361b5cc20SDaniel González Cabanelas /* PHY WoL may be enabled but device wakeup disabled */ 431461b5cc20SDaniel González Cabanelas if (wol.supported) 431561b5cc20SDaniel González Cabanelas device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); 431661b5cc20SDaniel González Cabanelas 4317503f9aa9SRussell King return err; 4318c5aff182SThomas Petazzoni } 4319c5aff182SThomas Petazzoni 4320c5aff182SThomas Petazzoni static void mvneta_mdio_remove(struct mvneta_port *pp) 4321c5aff182SThomas Petazzoni { 4322503f9aa9SRussell King phylink_disconnect_phy(pp->phylink); 4323c5aff182SThomas Petazzoni } 4324c5aff182SThomas Petazzoni 4325120cfa50SGregory CLEMENT /* Electing a CPU must be done in an atomic way: it should be done 4326120cfa50SGregory CLEMENT * after or before the removal/insertion of a CPU and this function is 4327120cfa50SGregory CLEMENT * not reentrant. 4328120cfa50SGregory CLEMENT */ 4329f8642885SMaxime Ripard static void mvneta_percpu_elect(struct mvneta_port *pp) 4330f8642885SMaxime Ripard { 43310cf9deb3SColin Ian King int elected_cpu = 0, max_cpu, cpu; 4332f8642885SMaxime Ripard 4333cad5d847SGregory CLEMENT /* Use the cpu associated to the rxq when it is online, in all 4334cad5d847SGregory CLEMENT * the other cases, use the cpu 0 which can't be offline. 4335cad5d847SGregory CLEMENT */ 4336cdd97383SDan Carpenter if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def)) 4337cad5d847SGregory CLEMENT elected_cpu = pp->rxq_def; 4338cad5d847SGregory CLEMENT 43392dcf75e2SGregory CLEMENT max_cpu = num_present_cpus(); 4340f8642885SMaxime Ripard 4341f8642885SMaxime Ripard for_each_online_cpu(cpu) { 43422dcf75e2SGregory CLEMENT int rxq_map = 0, txq_map = 0; 43432dcf75e2SGregory CLEMENT int rxq; 43442dcf75e2SGregory CLEMENT 43452dcf75e2SGregory CLEMENT for (rxq = 0; rxq < rxq_number; rxq++) 43462dcf75e2SGregory CLEMENT if ((rxq % max_cpu) == cpu) 43472dcf75e2SGregory CLEMENT rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 43482dcf75e2SGregory CLEMENT 4349cad5d847SGregory CLEMENT if (cpu == elected_cpu) 4350b52f6425SYangyang Li /* Map the default receive queue to the elected CPU */ 43512dcf75e2SGregory CLEMENT rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); 435250bf8cb6SGregory CLEMENT 435350bf8cb6SGregory CLEMENT /* We update the TX queue map only if we have one 435450bf8cb6SGregory CLEMENT * queue. In this case we associate the TX queue to 435550bf8cb6SGregory CLEMENT * the CPU bound to the default RX queue 435650bf8cb6SGregory CLEMENT */ 435750bf8cb6SGregory CLEMENT if (txq_number == 1) 4358cad5d847SGregory CLEMENT txq_map = (cpu == elected_cpu) ? 4359*21327f81SKlaus Kudielka MVNETA_CPU_TXQ_ACCESS(0) : 0; 436050bf8cb6SGregory CLEMENT else 436150bf8cb6SGregory CLEMENT txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 436250bf8cb6SGregory CLEMENT MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 436350bf8cb6SGregory CLEMENT 43642dcf75e2SGregory CLEMENT mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 43652dcf75e2SGregory CLEMENT 43662dcf75e2SGregory CLEMENT /* Update the interrupt mask on each CPU according the 43672dcf75e2SGregory CLEMENT * new mapping 43682dcf75e2SGregory CLEMENT */ 43692dcf75e2SGregory CLEMENT smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 4370f8642885SMaxime Ripard pp, true); 4371f8642885SMaxime Ripard } 4372f8642885SMaxime Ripard }; 4373f8642885SMaxime Ripard 437484a3f4dbSSebastian Andrzej Siewior static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) 4375f8642885SMaxime Ripard { 437684a3f4dbSSebastian Andrzej Siewior int other_cpu; 437784a3f4dbSSebastian Andrzej Siewior struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 437884a3f4dbSSebastian Andrzej Siewior node_online); 4379f8642885SMaxime Ripard struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 4380f8642885SMaxime Ripard 4381cf9bf871SMaxime Chevallier /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts 4382cf9bf871SMaxime Chevallier * are routed to CPU 0, so we don't need all the cpu-hotplug support 4383cf9bf871SMaxime Chevallier */ 4384cf9bf871SMaxime Chevallier if (pp->neta_armada3700) 4385cf9bf871SMaxime Chevallier return 0; 438684a3f4dbSSebastian Andrzej Siewior 4387120cfa50SGregory CLEMENT spin_lock(&pp->lock); 438884a3f4dbSSebastian Andrzej Siewior /* 438984a3f4dbSSebastian Andrzej Siewior * Configuring the driver for a new CPU while the driver is 439084a3f4dbSSebastian Andrzej Siewior * stopping is racy, so just avoid it. 4391120cfa50SGregory CLEMENT */ 4392120cfa50SGregory CLEMENT if (pp->is_stopped) { 4393120cfa50SGregory CLEMENT spin_unlock(&pp->lock); 439484a3f4dbSSebastian Andrzej Siewior return 0; 4395120cfa50SGregory CLEMENT } 4396f8642885SMaxime Ripard netif_tx_stop_all_queues(pp->dev); 4397f8642885SMaxime Ripard 439884a3f4dbSSebastian Andrzej Siewior /* 439984a3f4dbSSebastian Andrzej Siewior * We have to synchronise on tha napi of each CPU except the one 440084a3f4dbSSebastian Andrzej Siewior * just being woken up 4401f8642885SMaxime Ripard */ 4402f8642885SMaxime Ripard for_each_online_cpu(other_cpu) { 4403f8642885SMaxime Ripard if (other_cpu != cpu) { 4404f8642885SMaxime Ripard struct mvneta_pcpu_port *other_port = 4405f8642885SMaxime Ripard per_cpu_ptr(pp->ports, other_cpu); 4406f8642885SMaxime Ripard 4407f8642885SMaxime Ripard napi_synchronize(&other_port->napi); 4408f8642885SMaxime Ripard } 4409f8642885SMaxime Ripard } 4410f8642885SMaxime Ripard 4411f8642885SMaxime Ripard /* Mask all ethernet port interrupts */ 4412db488c10SGregory CLEMENT on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4413f8642885SMaxime Ripard napi_enable(&port->napi); 4414f8642885SMaxime Ripard 441584a3f4dbSSebastian Andrzej Siewior /* 441684a3f4dbSSebastian Andrzej Siewior * Enable per-CPU interrupts on the CPU that is 44172dcf75e2SGregory CLEMENT * brought up. 44182dcf75e2SGregory CLEMENT */ 44190e28bf93SAnna-Maria Gleixner mvneta_percpu_enable(pp); 44202dcf75e2SGregory CLEMENT 442184a3f4dbSSebastian Andrzej Siewior /* 442284a3f4dbSSebastian Andrzej Siewior * Enable per-CPU interrupt on the one CPU we care 4423f8642885SMaxime Ripard * about. 4424f8642885SMaxime Ripard */ 4425f8642885SMaxime Ripard mvneta_percpu_elect(pp); 4426f8642885SMaxime Ripard 4427db488c10SGregory CLEMENT /* Unmask all ethernet port interrupts */ 4428db488c10SGregory CLEMENT on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 4429f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_MISC_MASK, 4430f8642885SMaxime Ripard MVNETA_CAUSE_PHY_STATUS_CHANGE | 4431856b2cc5SRussell King MVNETA_CAUSE_LINK_CHANGE); 4432f8642885SMaxime Ripard netif_tx_start_all_queues(pp->dev); 4433120cfa50SGregory CLEMENT spin_unlock(&pp->lock); 443484a3f4dbSSebastian Andrzej Siewior return 0; 443584a3f4dbSSebastian Andrzej Siewior } 443684a3f4dbSSebastian Andrzej Siewior 443784a3f4dbSSebastian Andrzej Siewior static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node) 443884a3f4dbSSebastian Andrzej Siewior { 443984a3f4dbSSebastian Andrzej Siewior struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 444084a3f4dbSSebastian Andrzej Siewior node_online); 444184a3f4dbSSebastian Andrzej Siewior struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 444284a3f4dbSSebastian Andrzej Siewior 444384a3f4dbSSebastian Andrzej Siewior /* 444484a3f4dbSSebastian Andrzej Siewior * Thanks to this lock we are sure that any pending cpu election is 444584a3f4dbSSebastian Andrzej Siewior * done. 44465888511eSGregory CLEMENT */ 44475888511eSGregory CLEMENT spin_lock(&pp->lock); 4448f8642885SMaxime Ripard /* Mask all ethernet port interrupts */ 4449db488c10SGregory CLEMENT on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 44505888511eSGregory CLEMENT spin_unlock(&pp->lock); 4451f8642885SMaxime Ripard 4452f8642885SMaxime Ripard napi_synchronize(&port->napi); 4453f8642885SMaxime Ripard napi_disable(&port->napi); 445484a3f4dbSSebastian Andrzej Siewior /* Disable per-CPU interrupts on the CPU that is brought down. */ 44550e28bf93SAnna-Maria Gleixner mvneta_percpu_disable(pp); 445684a3f4dbSSebastian Andrzej Siewior return 0; 445784a3f4dbSSebastian Andrzej Siewior } 4458f8642885SMaxime Ripard 445984a3f4dbSSebastian Andrzej Siewior static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) 446084a3f4dbSSebastian Andrzej Siewior { 446184a3f4dbSSebastian Andrzej Siewior struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 446284a3f4dbSSebastian Andrzej Siewior node_dead); 446384a3f4dbSSebastian Andrzej Siewior 4464f8642885SMaxime Ripard /* Check if a new CPU must be elected now this on is down */ 4465120cfa50SGregory CLEMENT spin_lock(&pp->lock); 4466f8642885SMaxime Ripard mvneta_percpu_elect(pp); 4467120cfa50SGregory CLEMENT spin_unlock(&pp->lock); 4468f8642885SMaxime Ripard /* Unmask all ethernet port interrupts */ 4469db488c10SGregory CLEMENT on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 4470f8642885SMaxime Ripard mvreg_write(pp, MVNETA_INTR_MISC_MASK, 4471f8642885SMaxime Ripard MVNETA_CAUSE_PHY_STATUS_CHANGE | 4472856b2cc5SRussell King MVNETA_CAUSE_LINK_CHANGE); 4473f8642885SMaxime Ripard netif_tx_start_all_queues(pp->dev); 447484a3f4dbSSebastian Andrzej Siewior return 0; 4475f8642885SMaxime Ripard } 4476f8642885SMaxime Ripard 4477c5aff182SThomas Petazzoni static int mvneta_open(struct net_device *dev) 4478c5aff182SThomas Petazzoni { 4479c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 44806b125d63SGregory CLEMENT int ret; 4481c5aff182SThomas Petazzoni 4482c5aff182SThomas Petazzoni pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 4483c5aff182SThomas Petazzoni 4484c5aff182SThomas Petazzoni ret = mvneta_setup_rxqs(pp); 4485c5aff182SThomas Petazzoni if (ret) 4486c5aff182SThomas Petazzoni return ret; 4487c5aff182SThomas Petazzoni 4488c5aff182SThomas Petazzoni ret = mvneta_setup_txqs(pp); 4489c5aff182SThomas Petazzoni if (ret) 4490c5aff182SThomas Petazzoni goto err_cleanup_rxqs; 4491c5aff182SThomas Petazzoni 4492c5aff182SThomas Petazzoni /* Connect to port interrupt line */ 44932636ac3cSMarcin Wojtas if (pp->neta_armada3700) 44942636ac3cSMarcin Wojtas ret = request_irq(pp->dev->irq, mvneta_isr, 0, 44952636ac3cSMarcin Wojtas dev->name, pp); 44962636ac3cSMarcin Wojtas else 44972636ac3cSMarcin Wojtas ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, 44982636ac3cSMarcin Wojtas dev->name, pp->ports); 4499c5aff182SThomas Petazzoni if (ret) { 4500c5aff182SThomas Petazzoni netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); 4501c5aff182SThomas Petazzoni goto err_cleanup_txqs; 4502c5aff182SThomas Petazzoni } 4503c5aff182SThomas Petazzoni 45042636ac3cSMarcin Wojtas if (!pp->neta_armada3700) { 45052dcf75e2SGregory CLEMENT /* Enable per-CPU interrupt on all the CPU to handle our RX 45062dcf75e2SGregory CLEMENT * queue interrupts 45072dcf75e2SGregory CLEMENT */ 45086b125d63SGregory CLEMENT on_each_cpu(mvneta_percpu_enable, pp, true); 45092dcf75e2SGregory CLEMENT 4510120cfa50SGregory CLEMENT pp->is_stopped = false; 4511f8642885SMaxime Ripard /* Register a CPU notifier to handle the case where our CPU 4512f8642885SMaxime Ripard * might be taken offline. 4513f8642885SMaxime Ripard */ 451484a3f4dbSSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(online_hpstate, 451584a3f4dbSSebastian Andrzej Siewior &pp->node_online); 451684a3f4dbSSebastian Andrzej Siewior if (ret) 451784a3f4dbSSebastian Andrzej Siewior goto err_free_irq; 451884a3f4dbSSebastian Andrzej Siewior 451984a3f4dbSSebastian Andrzej Siewior ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 452084a3f4dbSSebastian Andrzej Siewior &pp->node_dead); 452184a3f4dbSSebastian Andrzej Siewior if (ret) 452284a3f4dbSSebastian Andrzej Siewior goto err_free_online_hp; 45232636ac3cSMarcin Wojtas } 4524f8642885SMaxime Ripard 4525c5aff182SThomas Petazzoni ret = mvneta_mdio_probe(pp); 4526c5aff182SThomas Petazzoni if (ret < 0) { 4527c5aff182SThomas Petazzoni netdev_err(dev, "cannot probe MDIO bus\n"); 452884a3f4dbSSebastian Andrzej Siewior goto err_free_dead_hp; 4529c5aff182SThomas Petazzoni } 4530c5aff182SThomas Petazzoni 4531c5aff182SThomas Petazzoni mvneta_start_dev(pp); 4532c5aff182SThomas Petazzoni 4533c5aff182SThomas Petazzoni return 0; 4534c5aff182SThomas Petazzoni 453584a3f4dbSSebastian Andrzej Siewior err_free_dead_hp: 45362636ac3cSMarcin Wojtas if (!pp->neta_armada3700) 453784a3f4dbSSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 453884a3f4dbSSebastian Andrzej Siewior &pp->node_dead); 453984a3f4dbSSebastian Andrzej Siewior err_free_online_hp: 45402636ac3cSMarcin Wojtas if (!pp->neta_armada3700) 45412636ac3cSMarcin Wojtas cpuhp_state_remove_instance_nocalls(online_hpstate, 45422636ac3cSMarcin Wojtas &pp->node_online); 4543c5aff182SThomas Petazzoni err_free_irq: 45442636ac3cSMarcin Wojtas if (pp->neta_armada3700) { 45452636ac3cSMarcin Wojtas free_irq(pp->dev->irq, pp); 45462636ac3cSMarcin Wojtas } else { 45473d8c4530SRussell King - ARM Linux on_each_cpu(mvneta_percpu_disable, pp, true); 454812bb03b4SMaxime Ripard free_percpu_irq(pp->dev->irq, pp->ports); 45492636ac3cSMarcin Wojtas } 4550c5aff182SThomas Petazzoni err_cleanup_txqs: 4551c5aff182SThomas Petazzoni mvneta_cleanup_txqs(pp); 4552c5aff182SThomas Petazzoni err_cleanup_rxqs: 4553c5aff182SThomas Petazzoni mvneta_cleanup_rxqs(pp); 4554c5aff182SThomas Petazzoni return ret; 4555c5aff182SThomas Petazzoni } 4556c5aff182SThomas Petazzoni 4557c5aff182SThomas Petazzoni /* Stop the port, free port interrupt line */ 4558c5aff182SThomas Petazzoni static int mvneta_stop(struct net_device *dev) 4559c5aff182SThomas Petazzoni { 4560c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 4561c5aff182SThomas Petazzoni 45622636ac3cSMarcin Wojtas if (!pp->neta_armada3700) { 4563120cfa50SGregory CLEMENT /* Inform that we are stopping so we don't want to setup the 45641c2722a9SGregory CLEMENT * driver for new CPUs in the notifiers. The code of the 45651c2722a9SGregory CLEMENT * notifier for CPU online is protected by the same spinlock, 45661c2722a9SGregory CLEMENT * so when we get the lock, the notifer work is done. 4567120cfa50SGregory CLEMENT */ 4568120cfa50SGregory CLEMENT spin_lock(&pp->lock); 4569120cfa50SGregory CLEMENT pp->is_stopped = true; 45701c2722a9SGregory CLEMENT spin_unlock(&pp->lock); 45711c2722a9SGregory CLEMENT 4572c5aff182SThomas Petazzoni mvneta_stop_dev(pp); 4573c5aff182SThomas Petazzoni mvneta_mdio_remove(pp); 457484a3f4dbSSebastian Andrzej Siewior 4575d26aac2dSDan Carpenter cpuhp_state_remove_instance_nocalls(online_hpstate, 4576d26aac2dSDan Carpenter &pp->node_online); 457784a3f4dbSSebastian Andrzej Siewior cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 457884a3f4dbSSebastian Andrzej Siewior &pp->node_dead); 4579129219e4SGregory CLEMENT on_each_cpu(mvneta_percpu_disable, pp, true); 458012bb03b4SMaxime Ripard free_percpu_irq(dev->irq, pp->ports); 45812636ac3cSMarcin Wojtas } else { 45822636ac3cSMarcin Wojtas mvneta_stop_dev(pp); 45832636ac3cSMarcin Wojtas mvneta_mdio_remove(pp); 45842636ac3cSMarcin Wojtas free_irq(dev->irq, pp); 45852636ac3cSMarcin Wojtas } 45862636ac3cSMarcin Wojtas 4587c5aff182SThomas Petazzoni mvneta_cleanup_rxqs(pp); 4588c5aff182SThomas Petazzoni mvneta_cleanup_txqs(pp); 4589c5aff182SThomas Petazzoni 4590c5aff182SThomas Petazzoni return 0; 4591c5aff182SThomas Petazzoni } 4592c5aff182SThomas Petazzoni 459315f59456SThomas Petazzoni static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 459415f59456SThomas Petazzoni { 4595503f9aa9SRussell King struct mvneta_port *pp = netdev_priv(dev); 459615f59456SThomas Petazzoni 4597503f9aa9SRussell King return phylink_mii_ioctl(pp->phylink, ifr, cmd); 459815f59456SThomas Petazzoni } 459915f59456SThomas Petazzoni 46000db51da7SLorenzo Bianconi static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, 46010db51da7SLorenzo Bianconi struct netlink_ext_ack *extack) 46020db51da7SLorenzo Bianconi { 46030db51da7SLorenzo Bianconi bool need_update, running = netif_running(dev); 46040db51da7SLorenzo Bianconi struct mvneta_port *pp = netdev_priv(dev); 46050db51da7SLorenzo Bianconi struct bpf_prog *old_prog; 46060db51da7SLorenzo Bianconi 4607e121d270SLorenzo Bianconi if (prog && !prog->aux->xdp_has_frags && 4608e121d270SLorenzo Bianconi dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { 4609e121d270SLorenzo Bianconi NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags"); 46100db51da7SLorenzo Bianconi return -EOPNOTSUPP; 46110db51da7SLorenzo Bianconi } 46120db51da7SLorenzo Bianconi 461379572c98SSven Auhagen if (pp->bm_priv) { 461479572c98SSven Auhagen NL_SET_ERR_MSG_MOD(extack, 461579572c98SSven Auhagen "Hardware Buffer Management not supported on XDP"); 461679572c98SSven Auhagen return -EOPNOTSUPP; 461779572c98SSven Auhagen } 461879572c98SSven Auhagen 46190db51da7SLorenzo Bianconi need_update = !!pp->xdp_prog != !!prog; 46200db51da7SLorenzo Bianconi if (running && need_update) 46210db51da7SLorenzo Bianconi mvneta_stop(dev); 46220db51da7SLorenzo Bianconi 46230db51da7SLorenzo Bianconi old_prog = xchg(&pp->xdp_prog, prog); 46240db51da7SLorenzo Bianconi if (old_prog) 46250db51da7SLorenzo Bianconi bpf_prog_put(old_prog); 46260db51da7SLorenzo Bianconi 46270db51da7SLorenzo Bianconi if (running && need_update) 46280db51da7SLorenzo Bianconi return mvneta_open(dev); 46290db51da7SLorenzo Bianconi 46300db51da7SLorenzo Bianconi return 0; 46310db51da7SLorenzo Bianconi } 46320db51da7SLorenzo Bianconi 46330db51da7SLorenzo Bianconi static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) 46340db51da7SLorenzo Bianconi { 46350db51da7SLorenzo Bianconi switch (xdp->command) { 46360db51da7SLorenzo Bianconi case XDP_SETUP_PROG: 46370db51da7SLorenzo Bianconi return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); 46380db51da7SLorenzo Bianconi default: 46390db51da7SLorenzo Bianconi return -EINVAL; 46400db51da7SLorenzo Bianconi } 46410db51da7SLorenzo Bianconi } 46420db51da7SLorenzo Bianconi 4643c5aff182SThomas Petazzoni /* Ethtool methods */ 4644c5aff182SThomas Petazzoni 4645013ad40dSPhilippe Reynes /* Set link ksettings (phy address, speed) for ethtools */ 46462dc0d2b4SBaoyou Xie static int 46472dc0d2b4SBaoyou Xie mvneta_ethtool_set_link_ksettings(struct net_device *ndev, 4648013ad40dSPhilippe Reynes const struct ethtool_link_ksettings *cmd) 4649c5aff182SThomas Petazzoni { 4650013ad40dSPhilippe Reynes struct mvneta_port *pp = netdev_priv(ndev); 4651c5aff182SThomas Petazzoni 4652503f9aa9SRussell King return phylink_ethtool_ksettings_set(pp->phylink, cmd); 46530c0744fcSStas Sergeev } 46540c0744fcSStas Sergeev 4655503f9aa9SRussell King /* Get link ksettings for ethtools */ 4656503f9aa9SRussell King static int 4657503f9aa9SRussell King mvneta_ethtool_get_link_ksettings(struct net_device *ndev, 4658503f9aa9SRussell King struct ethtool_link_ksettings *cmd) 4659503f9aa9SRussell King { 4660503f9aa9SRussell King struct mvneta_port *pp = netdev_priv(ndev); 46610c0744fcSStas Sergeev 4662503f9aa9SRussell King return phylink_ethtool_ksettings_get(pp->phylink, cmd); 46630c0744fcSStas Sergeev } 46640c0744fcSStas Sergeev 4665503f9aa9SRussell King static int mvneta_ethtool_nway_reset(struct net_device *dev) 4666503f9aa9SRussell King { 4667503f9aa9SRussell King struct mvneta_port *pp = netdev_priv(dev); 4668503f9aa9SRussell King 4669503f9aa9SRussell King return phylink_ethtool_nway_reset(pp->phylink); 4670c5aff182SThomas Petazzoni } 4671c5aff182SThomas Petazzoni 4672c5aff182SThomas Petazzoni /* Set interrupt coalescing for ethtools */ 4673f3ccfda1SYufeng Mo static int 4674f3ccfda1SYufeng Mo mvneta_ethtool_set_coalesce(struct net_device *dev, 4675f3ccfda1SYufeng Mo struct ethtool_coalesce *c, 4676f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal, 4677f3ccfda1SYufeng Mo struct netlink_ext_ack *extack) 4678c5aff182SThomas Petazzoni { 4679c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 4680c5aff182SThomas Petazzoni int queue; 4681c5aff182SThomas Petazzoni 4682c5aff182SThomas Petazzoni for (queue = 0; queue < rxq_number; queue++) { 4683c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 4684c5aff182SThomas Petazzoni rxq->time_coal = c->rx_coalesce_usecs; 4685c5aff182SThomas Petazzoni rxq->pkts_coal = c->rx_max_coalesced_frames; 4686c5aff182SThomas Petazzoni mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 4687c5aff182SThomas Petazzoni mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 4688c5aff182SThomas Petazzoni } 4689c5aff182SThomas Petazzoni 4690c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 4691c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq = &pp->txqs[queue]; 4692c5aff182SThomas Petazzoni txq->done_pkts_coal = c->tx_max_coalesced_frames; 4693c5aff182SThomas Petazzoni mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 4694c5aff182SThomas Petazzoni } 4695c5aff182SThomas Petazzoni 4696c5aff182SThomas Petazzoni return 0; 4697c5aff182SThomas Petazzoni } 4698c5aff182SThomas Petazzoni 4699c5aff182SThomas Petazzoni /* get coalescing for ethtools */ 4700f3ccfda1SYufeng Mo static int 4701f3ccfda1SYufeng Mo mvneta_ethtool_get_coalesce(struct net_device *dev, 4702f3ccfda1SYufeng Mo struct ethtool_coalesce *c, 4703f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal, 4704f3ccfda1SYufeng Mo struct netlink_ext_ack *extack) 4705c5aff182SThomas Petazzoni { 4706c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 4707c5aff182SThomas Petazzoni 4708c5aff182SThomas Petazzoni c->rx_coalesce_usecs = pp->rxqs[0].time_coal; 4709c5aff182SThomas Petazzoni c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; 4710c5aff182SThomas Petazzoni 4711c5aff182SThomas Petazzoni c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; 4712c5aff182SThomas Petazzoni return 0; 4713c5aff182SThomas Petazzoni } 4714c5aff182SThomas Petazzoni 4715c5aff182SThomas Petazzoni 4716c5aff182SThomas Petazzoni static void mvneta_ethtool_get_drvinfo(struct net_device *dev, 4717c5aff182SThomas Petazzoni struct ethtool_drvinfo *drvinfo) 4718c5aff182SThomas Petazzoni { 4719f029c781SWolfram Sang strscpy(drvinfo->driver, MVNETA_DRIVER_NAME, 4720c5aff182SThomas Petazzoni sizeof(drvinfo->driver)); 4721f029c781SWolfram Sang strscpy(drvinfo->version, MVNETA_DRIVER_VERSION, 4722c5aff182SThomas Petazzoni sizeof(drvinfo->version)); 4723f029c781SWolfram Sang strscpy(drvinfo->bus_info, dev_name(&dev->dev), 4724c5aff182SThomas Petazzoni sizeof(drvinfo->bus_info)); 4725c5aff182SThomas Petazzoni } 4726c5aff182SThomas Petazzoni 4727c5aff182SThomas Petazzoni 472874624944SHao Chen static void 472974624944SHao Chen mvneta_ethtool_get_ringparam(struct net_device *netdev, 473074624944SHao Chen struct ethtool_ringparam *ring, 473174624944SHao Chen struct kernel_ethtool_ringparam *kernel_ring, 473274624944SHao Chen struct netlink_ext_ack *extack) 4733c5aff182SThomas Petazzoni { 4734c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(netdev); 4735c5aff182SThomas Petazzoni 4736c5aff182SThomas Petazzoni ring->rx_max_pending = MVNETA_MAX_RXD; 4737c5aff182SThomas Petazzoni ring->tx_max_pending = MVNETA_MAX_TXD; 4738c5aff182SThomas Petazzoni ring->rx_pending = pp->rx_ring_size; 4739c5aff182SThomas Petazzoni ring->tx_pending = pp->tx_ring_size; 4740c5aff182SThomas Petazzoni } 4741c5aff182SThomas Petazzoni 474274624944SHao Chen static int 474374624944SHao Chen mvneta_ethtool_set_ringparam(struct net_device *dev, 474474624944SHao Chen struct ethtool_ringparam *ring, 474574624944SHao Chen struct kernel_ethtool_ringparam *kernel_ring, 474674624944SHao Chen struct netlink_ext_ack *extack) 4747c5aff182SThomas Petazzoni { 4748c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 4749c5aff182SThomas Petazzoni 4750c5aff182SThomas Petazzoni if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) 4751c5aff182SThomas Petazzoni return -EINVAL; 4752c5aff182SThomas Petazzoni pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 4753c5aff182SThomas Petazzoni ring->rx_pending : MVNETA_MAX_RXD; 47548eef5f97SEzequiel Garcia 47558eef5f97SEzequiel Garcia pp->tx_ring_size = clamp_t(u16, ring->tx_pending, 47568eef5f97SEzequiel Garcia MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); 47578eef5f97SEzequiel Garcia if (pp->tx_ring_size != ring->tx_pending) 47588eef5f97SEzequiel Garcia netdev_warn(dev, "TX queue size set to %u (requested %u)\n", 47598eef5f97SEzequiel Garcia pp->tx_ring_size, ring->tx_pending); 4760c5aff182SThomas Petazzoni 4761c5aff182SThomas Petazzoni if (netif_running(dev)) { 4762c5aff182SThomas Petazzoni mvneta_stop(dev); 4763c5aff182SThomas Petazzoni if (mvneta_open(dev)) { 4764c5aff182SThomas Petazzoni netdev_err(dev, 4765c5aff182SThomas Petazzoni "error on opening device after ring param change\n"); 4766c5aff182SThomas Petazzoni return -ENOMEM; 4767c5aff182SThomas Petazzoni } 4768c5aff182SThomas Petazzoni } 4769c5aff182SThomas Petazzoni 4770c5aff182SThomas Petazzoni return 0; 4771c5aff182SThomas Petazzoni } 4772c5aff182SThomas Petazzoni 47734932a918SRussell King static void mvneta_ethtool_get_pauseparam(struct net_device *dev, 47744932a918SRussell King struct ethtool_pauseparam *pause) 47754932a918SRussell King { 47764932a918SRussell King struct mvneta_port *pp = netdev_priv(dev); 47774932a918SRussell King 47784932a918SRussell King phylink_ethtool_get_pauseparam(pp->phylink, pause); 47794932a918SRussell King } 47804932a918SRussell King 47814932a918SRussell King static int mvneta_ethtool_set_pauseparam(struct net_device *dev, 47824932a918SRussell King struct ethtool_pauseparam *pause) 47834932a918SRussell King { 47844932a918SRussell King struct mvneta_port *pp = netdev_priv(dev); 47854932a918SRussell King 47864932a918SRussell King return phylink_ethtool_set_pauseparam(pp->phylink, pause); 47874932a918SRussell King } 47884932a918SRussell King 47899b0cdefaSRussell King static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, 47909b0cdefaSRussell King u8 *data) 47919b0cdefaSRussell King { 47929b0cdefaSRussell King if (sset == ETH_SS_STATS) { 47939b0cdefaSRussell King int i; 47949b0cdefaSRussell King 47959b0cdefaSRussell King for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 47969b0cdefaSRussell King memcpy(data + i * ETH_GSTRING_LEN, 47979b0cdefaSRussell King mvneta_statistics[i].name, ETH_GSTRING_LEN); 4798b3fc7922SLorenzo Bianconi 4799b3fc7922SLorenzo Bianconi data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics); 4800b3fc7922SLorenzo Bianconi page_pool_ethtool_stats_get_strings(data); 48019b0cdefaSRussell King } 48029b0cdefaSRussell King } 48039b0cdefaSRussell King 48049ac41f3cSLorenzo Bianconi static void 48059ac41f3cSLorenzo Bianconi mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, 48069ac41f3cSLorenzo Bianconi struct mvneta_ethtool_stats *es) 48079ac41f3cSLorenzo Bianconi { 48089ac41f3cSLorenzo Bianconi unsigned int start; 48099ac41f3cSLorenzo Bianconi int cpu; 48109ac41f3cSLorenzo Bianconi 48119ac41f3cSLorenzo Bianconi for_each_possible_cpu(cpu) { 48129ac41f3cSLorenzo Bianconi struct mvneta_pcpu_stats *stats; 48139ac41f3cSLorenzo Bianconi u64 skb_alloc_error; 48149ac41f3cSLorenzo Bianconi u64 refill_error; 48153d866523SLorenzo Bianconi u64 xdp_redirect; 481615070919SJesper Dangaard Brouer u64 xdp_xmit_err; 481715070919SJesper Dangaard Brouer u64 xdp_tx_err; 48183d866523SLorenzo Bianconi u64 xdp_pass; 48193d866523SLorenzo Bianconi u64 xdp_drop; 48207d51a015SLorenzo Bianconi u64 xdp_xmit; 48213d866523SLorenzo Bianconi u64 xdp_tx; 48229ac41f3cSLorenzo Bianconi 48239ac41f3cSLorenzo Bianconi stats = per_cpu_ptr(pp->stats, cpu); 48249ac41f3cSLorenzo Bianconi do { 4825068c38adSThomas Gleixner start = u64_stats_fetch_begin(&stats->syncp); 48269ac41f3cSLorenzo Bianconi skb_alloc_error = stats->es.skb_alloc_error; 48279ac41f3cSLorenzo Bianconi refill_error = stats->es.refill_error; 48283d866523SLorenzo Bianconi xdp_redirect = stats->es.ps.xdp_redirect; 48293d866523SLorenzo Bianconi xdp_pass = stats->es.ps.xdp_pass; 48303d866523SLorenzo Bianconi xdp_drop = stats->es.ps.xdp_drop; 48317d51a015SLorenzo Bianconi xdp_xmit = stats->es.ps.xdp_xmit; 483215070919SJesper Dangaard Brouer xdp_xmit_err = stats->es.ps.xdp_xmit_err; 48333d866523SLorenzo Bianconi xdp_tx = stats->es.ps.xdp_tx; 483415070919SJesper Dangaard Brouer xdp_tx_err = stats->es.ps.xdp_tx_err; 4835068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&stats->syncp, start)); 48369ac41f3cSLorenzo Bianconi 48379ac41f3cSLorenzo Bianconi es->skb_alloc_error += skb_alloc_error; 48389ac41f3cSLorenzo Bianconi es->refill_error += refill_error; 48393d866523SLorenzo Bianconi es->ps.xdp_redirect += xdp_redirect; 48403d866523SLorenzo Bianconi es->ps.xdp_pass += xdp_pass; 48413d866523SLorenzo Bianconi es->ps.xdp_drop += xdp_drop; 48427d51a015SLorenzo Bianconi es->ps.xdp_xmit += xdp_xmit; 484315070919SJesper Dangaard Brouer es->ps.xdp_xmit_err += xdp_xmit_err; 48443d866523SLorenzo Bianconi es->ps.xdp_tx += xdp_tx; 484515070919SJesper Dangaard Brouer es->ps.xdp_tx_err += xdp_tx_err; 48469ac41f3cSLorenzo Bianconi } 48479ac41f3cSLorenzo Bianconi } 48489ac41f3cSLorenzo Bianconi 48499b0cdefaSRussell King static void mvneta_ethtool_update_stats(struct mvneta_port *pp) 48509b0cdefaSRussell King { 48519ac41f3cSLorenzo Bianconi struct mvneta_ethtool_stats stats = {}; 48529b0cdefaSRussell King const struct mvneta_statistic *s; 48539b0cdefaSRussell King void __iomem *base = pp->base; 48546d81f451SRussell King u32 high, low; 48556d81f451SRussell King u64 val; 48569b0cdefaSRussell King int i; 48579b0cdefaSRussell King 48589ac41f3cSLorenzo Bianconi mvneta_ethtool_update_pcpu_stats(pp, &stats); 48599b0cdefaSRussell King for (i = 0, s = mvneta_statistics; 48609b0cdefaSRussell King s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); 48619b0cdefaSRussell King s++, i++) { 48629b0cdefaSRussell King switch (s->type) { 48639b0cdefaSRussell King case T_REG_32: 48649b0cdefaSRussell King val = readl_relaxed(base + s->offset); 48659ac41f3cSLorenzo Bianconi pp->ethtool_stats[i] += val; 48669b0cdefaSRussell King break; 48679b0cdefaSRussell King case T_REG_64: 48689b0cdefaSRussell King /* Docs say to read low 32-bit then high */ 48699b0cdefaSRussell King low = readl_relaxed(base + s->offset); 48709b0cdefaSRussell King high = readl_relaxed(base + s->offset + 4); 48716d81f451SRussell King val = (u64)high << 32 | low; 48729ac41f3cSLorenzo Bianconi pp->ethtool_stats[i] += val; 48736d81f451SRussell King break; 48746d81f451SRussell King case T_SW: 48756d81f451SRussell King switch (s->offset) { 48766d81f451SRussell King case ETHTOOL_STAT_EEE_WAKEUP: 48776d81f451SRussell King val = phylink_get_eee_err(pp->phylink); 48789ac41f3cSLorenzo Bianconi pp->ethtool_stats[i] += val; 48799b0cdefaSRussell King break; 488017a96da6SGregory CLEMENT case ETHTOOL_STAT_SKB_ALLOC_ERR: 48819ac41f3cSLorenzo Bianconi pp->ethtool_stats[i] = stats.skb_alloc_error; 488217a96da6SGregory CLEMENT break; 488317a96da6SGregory CLEMENT case ETHTOOL_STAT_REFILL_ERR: 48849ac41f3cSLorenzo Bianconi pp->ethtool_stats[i] = stats.refill_error; 488517a96da6SGregory CLEMENT break; 48863d866523SLorenzo Bianconi case ETHTOOL_XDP_REDIRECT: 48873d866523SLorenzo Bianconi pp->ethtool_stats[i] = stats.ps.xdp_redirect; 48883d866523SLorenzo Bianconi break; 48893d866523SLorenzo Bianconi case ETHTOOL_XDP_PASS: 48903d866523SLorenzo Bianconi pp->ethtool_stats[i] = stats.ps.xdp_pass; 48913d866523SLorenzo Bianconi break; 48923d866523SLorenzo Bianconi case ETHTOOL_XDP_DROP: 48933d866523SLorenzo Bianconi pp->ethtool_stats[i] = stats.ps.xdp_drop; 48943d866523SLorenzo Bianconi break; 48953d866523SLorenzo Bianconi case ETHTOOL_XDP_TX: 48963d866523SLorenzo Bianconi pp->ethtool_stats[i] = stats.ps.xdp_tx; 48973d866523SLorenzo Bianconi break; 489815070919SJesper Dangaard Brouer case ETHTOOL_XDP_TX_ERR: 489915070919SJesper Dangaard Brouer pp->ethtool_stats[i] = stats.ps.xdp_tx_err; 490015070919SJesper Dangaard Brouer break; 49017d51a015SLorenzo Bianconi case ETHTOOL_XDP_XMIT: 49027d51a015SLorenzo Bianconi pp->ethtool_stats[i] = stats.ps.xdp_xmit; 49037d51a015SLorenzo Bianconi break; 490415070919SJesper Dangaard Brouer case ETHTOOL_XDP_XMIT_ERR: 490515070919SJesper Dangaard Brouer pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; 490615070919SJesper Dangaard Brouer break; 49079b0cdefaSRussell King } 49086d81f451SRussell King break; 49096d81f451SRussell King } 49109b0cdefaSRussell King } 49119b0cdefaSRussell King } 49129b0cdefaSRussell King 4913b3fc7922SLorenzo Bianconi static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data) 4914b3fc7922SLorenzo Bianconi { 4915b3fc7922SLorenzo Bianconi struct page_pool_stats stats = {}; 4916b3fc7922SLorenzo Bianconi int i; 4917b3fc7922SLorenzo Bianconi 4918b3fc7922SLorenzo Bianconi for (i = 0; i < rxq_number; i++) 4919b3fc7922SLorenzo Bianconi page_pool_get_stats(pp->rxqs[i].page_pool, &stats); 4920b3fc7922SLorenzo Bianconi 4921b3fc7922SLorenzo Bianconi page_pool_ethtool_stats_get(data, &stats); 4922b3fc7922SLorenzo Bianconi } 4923b3fc7922SLorenzo Bianconi 49249b0cdefaSRussell King static void mvneta_ethtool_get_stats(struct net_device *dev, 49259b0cdefaSRussell King struct ethtool_stats *stats, u64 *data) 49269b0cdefaSRussell King { 49279b0cdefaSRussell King struct mvneta_port *pp = netdev_priv(dev); 49289b0cdefaSRussell King int i; 49299b0cdefaSRussell King 49309b0cdefaSRussell King mvneta_ethtool_update_stats(pp); 49319b0cdefaSRussell King 49329b0cdefaSRussell King for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 49339b0cdefaSRussell King *data++ = pp->ethtool_stats[i]; 4934b3fc7922SLorenzo Bianconi 4935b3fc7922SLorenzo Bianconi mvneta_ethtool_pp_stats(pp, data); 49369b0cdefaSRussell King } 49379b0cdefaSRussell King 49389b0cdefaSRussell King static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) 49399b0cdefaSRussell King { 49409b0cdefaSRussell King if (sset == ETH_SS_STATS) 4941b3fc7922SLorenzo Bianconi return ARRAY_SIZE(mvneta_statistics) + 4942b3fc7922SLorenzo Bianconi page_pool_ethtool_stats_get_count(); 4943b3fc7922SLorenzo Bianconi 49449b0cdefaSRussell King return -EOPNOTSUPP; 49459b0cdefaSRussell King } 49469b0cdefaSRussell King 49479a401deaSGregory CLEMENT static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) 49489a401deaSGregory CLEMENT { 49499a401deaSGregory CLEMENT return MVNETA_RSS_LU_TABLE_SIZE; 49509a401deaSGregory CLEMENT } 49519a401deaSGregory CLEMENT 49529a401deaSGregory CLEMENT static int mvneta_ethtool_get_rxnfc(struct net_device *dev, 49539a401deaSGregory CLEMENT struct ethtool_rxnfc *info, 49549a401deaSGregory CLEMENT u32 *rules __always_unused) 49559a401deaSGregory CLEMENT { 49569a401deaSGregory CLEMENT switch (info->cmd) { 49579a401deaSGregory CLEMENT case ETHTOOL_GRXRINGS: 49589a401deaSGregory CLEMENT info->data = rxq_number; 49599a401deaSGregory CLEMENT return 0; 49609a401deaSGregory CLEMENT case ETHTOOL_GRXFH: 49619a401deaSGregory CLEMENT return -EOPNOTSUPP; 49629a401deaSGregory CLEMENT default: 49639a401deaSGregory CLEMENT return -EOPNOTSUPP; 49649a401deaSGregory CLEMENT } 49659a401deaSGregory CLEMENT } 49669a401deaSGregory CLEMENT 49679a401deaSGregory CLEMENT static int mvneta_config_rss(struct mvneta_port *pp) 49689a401deaSGregory CLEMENT { 49699a401deaSGregory CLEMENT int cpu; 49709a401deaSGregory CLEMENT u32 val; 49719a401deaSGregory CLEMENT 49729a401deaSGregory CLEMENT netif_tx_stop_all_queues(pp->dev); 49739a401deaSGregory CLEMENT 49746b125d63SGregory CLEMENT on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 49759a401deaSGregory CLEMENT 49760f5c6c30SJisheng Zhang if (!pp->neta_armada3700) { 49779a401deaSGregory CLEMENT /* We have to synchronise on the napi of each CPU */ 49789a401deaSGregory CLEMENT for_each_online_cpu(cpu) { 49799a401deaSGregory CLEMENT struct mvneta_pcpu_port *pcpu_port = 49809a401deaSGregory CLEMENT per_cpu_ptr(pp->ports, cpu); 49819a401deaSGregory CLEMENT 49829a401deaSGregory CLEMENT napi_synchronize(&pcpu_port->napi); 49839a401deaSGregory CLEMENT napi_disable(&pcpu_port->napi); 49849a401deaSGregory CLEMENT } 49850f5c6c30SJisheng Zhang } else { 49860f5c6c30SJisheng Zhang napi_synchronize(&pp->napi); 49870f5c6c30SJisheng Zhang napi_disable(&pp->napi); 49880f5c6c30SJisheng Zhang } 49899a401deaSGregory CLEMENT 49909a401deaSGregory CLEMENT pp->rxq_def = pp->indir[0]; 49919a401deaSGregory CLEMENT 49929a401deaSGregory CLEMENT /* Update unicast mapping */ 49939a401deaSGregory CLEMENT mvneta_set_rx_mode(pp->dev); 49949a401deaSGregory CLEMENT 49959a401deaSGregory CLEMENT /* Update val of portCfg register accordingly with all RxQueue types */ 49969a401deaSGregory CLEMENT val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 49979a401deaSGregory CLEMENT mvreg_write(pp, MVNETA_PORT_CONFIG, val); 49989a401deaSGregory CLEMENT 49999a401deaSGregory CLEMENT /* Update the elected CPU matching the new rxq_def */ 5000120cfa50SGregory CLEMENT spin_lock(&pp->lock); 50019a401deaSGregory CLEMENT mvneta_percpu_elect(pp); 5002120cfa50SGregory CLEMENT spin_unlock(&pp->lock); 50039a401deaSGregory CLEMENT 50040f5c6c30SJisheng Zhang if (!pp->neta_armada3700) { 50059a401deaSGregory CLEMENT /* We have to synchronise on the napi of each CPU */ 50069a401deaSGregory CLEMENT for_each_online_cpu(cpu) { 50079a401deaSGregory CLEMENT struct mvneta_pcpu_port *pcpu_port = 50089a401deaSGregory CLEMENT per_cpu_ptr(pp->ports, cpu); 50099a401deaSGregory CLEMENT 50109a401deaSGregory CLEMENT napi_enable(&pcpu_port->napi); 50119a401deaSGregory CLEMENT } 50120f5c6c30SJisheng Zhang } else { 50130f5c6c30SJisheng Zhang napi_enable(&pp->napi); 50140f5c6c30SJisheng Zhang } 50159a401deaSGregory CLEMENT 50169a401deaSGregory CLEMENT netif_tx_start_all_queues(pp->dev); 50179a401deaSGregory CLEMENT 50189a401deaSGregory CLEMENT return 0; 50199a401deaSGregory CLEMENT } 50209a401deaSGregory CLEMENT 50219a401deaSGregory CLEMENT static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, 50229a401deaSGregory CLEMENT const u8 *key, const u8 hfunc) 50239a401deaSGregory CLEMENT { 50249a401deaSGregory CLEMENT struct mvneta_port *pp = netdev_priv(dev); 50252636ac3cSMarcin Wojtas 50262636ac3cSMarcin Wojtas /* Current code for Armada 3700 doesn't support RSS features yet */ 50272636ac3cSMarcin Wojtas if (pp->neta_armada3700) 50282636ac3cSMarcin Wojtas return -EOPNOTSUPP; 50292636ac3cSMarcin Wojtas 50309a401deaSGregory CLEMENT /* We require at least one supported parameter to be changed 50319a401deaSGregory CLEMENT * and no change in any of the unsupported parameters 50329a401deaSGregory CLEMENT */ 50339a401deaSGregory CLEMENT if (key || 50349a401deaSGregory CLEMENT (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 50359a401deaSGregory CLEMENT return -EOPNOTSUPP; 50369a401deaSGregory CLEMENT 50379a401deaSGregory CLEMENT if (!indir) 50389a401deaSGregory CLEMENT return 0; 50399a401deaSGregory CLEMENT 50409a401deaSGregory CLEMENT memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); 50419a401deaSGregory CLEMENT 50429a401deaSGregory CLEMENT return mvneta_config_rss(pp); 50439a401deaSGregory CLEMENT } 50449a401deaSGregory CLEMENT 50459a401deaSGregory CLEMENT static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 50469a401deaSGregory CLEMENT u8 *hfunc) 50479a401deaSGregory CLEMENT { 50489a401deaSGregory CLEMENT struct mvneta_port *pp = netdev_priv(dev); 50499a401deaSGregory CLEMENT 50502636ac3cSMarcin Wojtas /* Current code for Armada 3700 doesn't support RSS features yet */ 50512636ac3cSMarcin Wojtas if (pp->neta_armada3700) 50522636ac3cSMarcin Wojtas return -EOPNOTSUPP; 50532636ac3cSMarcin Wojtas 50549a401deaSGregory CLEMENT if (hfunc) 50559a401deaSGregory CLEMENT *hfunc = ETH_RSS_HASH_TOP; 50569a401deaSGregory CLEMENT 50579a401deaSGregory CLEMENT if (!indir) 50589a401deaSGregory CLEMENT return 0; 50599a401deaSGregory CLEMENT 50609a401deaSGregory CLEMENT memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); 50619a401deaSGregory CLEMENT 50629a401deaSGregory CLEMENT return 0; 50639a401deaSGregory CLEMENT } 50649a401deaSGregory CLEMENT 5065b60a00f9SJingju Hou static void mvneta_ethtool_get_wol(struct net_device *dev, 5066b60a00f9SJingju Hou struct ethtool_wolinfo *wol) 5067b60a00f9SJingju Hou { 5068503f9aa9SRussell King struct mvneta_port *pp = netdev_priv(dev); 5069b60a00f9SJingju Hou 5070503f9aa9SRussell King phylink_ethtool_get_wol(pp->phylink, wol); 5071b60a00f9SJingju Hou } 5072b60a00f9SJingju Hou 5073b60a00f9SJingju Hou static int mvneta_ethtool_set_wol(struct net_device *dev, 5074b60a00f9SJingju Hou struct ethtool_wolinfo *wol) 5075b60a00f9SJingju Hou { 5076503f9aa9SRussell King struct mvneta_port *pp = netdev_priv(dev); 507782960fffSJisheng Zhang int ret; 507882960fffSJisheng Zhang 5079503f9aa9SRussell King ret = phylink_ethtool_set_wol(pp->phylink, wol); 508082960fffSJisheng Zhang if (!ret) 508182960fffSJisheng Zhang device_set_wakeup_enable(&dev->dev, !!wol->wolopts); 508282960fffSJisheng Zhang 508382960fffSJisheng Zhang return ret; 5084b60a00f9SJingju Hou } 5085b60a00f9SJingju Hou 50866d81f451SRussell King static int mvneta_ethtool_get_eee(struct net_device *dev, 50876d81f451SRussell King struct ethtool_eee *eee) 50886d81f451SRussell King { 50896d81f451SRussell King struct mvneta_port *pp = netdev_priv(dev); 50906d81f451SRussell King u32 lpi_ctl0; 50916d81f451SRussell King 50926d81f451SRussell King lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); 50936d81f451SRussell King 50946d81f451SRussell King eee->eee_enabled = pp->eee_enabled; 50956d81f451SRussell King eee->eee_active = pp->eee_active; 50966d81f451SRussell King eee->tx_lpi_enabled = pp->tx_lpi_enabled; 50976d81f451SRussell King eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; 50986d81f451SRussell King 50996d81f451SRussell King return phylink_ethtool_get_eee(pp->phylink, eee); 51006d81f451SRussell King } 51016d81f451SRussell King 51026d81f451SRussell King static int mvneta_ethtool_set_eee(struct net_device *dev, 51036d81f451SRussell King struct ethtool_eee *eee) 51046d81f451SRussell King { 51056d81f451SRussell King struct mvneta_port *pp = netdev_priv(dev); 51066d81f451SRussell King u32 lpi_ctl0; 51076d81f451SRussell King 51086d81f451SRussell King /* The Armada 37x documents do not give limits for this other than 5109df4a17a9SYangyang Li * it being an 8-bit register. 5110df4a17a9SYangyang Li */ 5111e4a3e9ffSYueHaibing if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) 51126d81f451SRussell King return -EINVAL; 51136d81f451SRussell King 51146d81f451SRussell King lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); 51156d81f451SRussell King lpi_ctl0 &= ~(0xff << 8); 51166d81f451SRussell King lpi_ctl0 |= eee->tx_lpi_timer << 8; 51176d81f451SRussell King mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); 51186d81f451SRussell King 51196d81f451SRussell King pp->eee_enabled = eee->eee_enabled; 51206d81f451SRussell King pp->tx_lpi_enabled = eee->tx_lpi_enabled; 51216d81f451SRussell King 51226d81f451SRussell King mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); 51236d81f451SRussell King 51246d81f451SRussell King return phylink_ethtool_set_eee(pp->phylink, eee); 51256d81f451SRussell King } 51266d81f451SRussell King 51274906887aSMaxime Chevallier static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) 51284906887aSMaxime Chevallier { 51294906887aSMaxime Chevallier mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0); 51304906887aSMaxime Chevallier } 51314906887aSMaxime Chevallier 5132e9f7099dSMaxime Chevallier static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq) 51334906887aSMaxime Chevallier { 5134e9f7099dSMaxime Chevallier u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ); 51354906887aSMaxime Chevallier 5136e9f7099dSMaxime Chevallier val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7); 5137e9f7099dSMaxime Chevallier val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq); 51384906887aSMaxime Chevallier 51394906887aSMaxime Chevallier mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val); 51404906887aSMaxime Chevallier } 51414906887aSMaxime Chevallier 51422551dc9eSMaxime Chevallier static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp) 51432551dc9eSMaxime Chevallier { 51442551dc9eSMaxime Chevallier unsigned long core_clk_rate; 51452551dc9eSMaxime Chevallier u32 refill_cycles; 51462551dc9eSMaxime Chevallier u32 val; 51472551dc9eSMaxime Chevallier 51482551dc9eSMaxime Chevallier core_clk_rate = clk_get_rate(pp->clk); 51492551dc9eSMaxime Chevallier if (!core_clk_rate) 51502551dc9eSMaxime Chevallier return -EINVAL; 51512551dc9eSMaxime Chevallier 51522551dc9eSMaxime Chevallier refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS / 51532551dc9eSMaxime Chevallier (NSEC_PER_SEC / core_clk_rate); 51542551dc9eSMaxime Chevallier 51552551dc9eSMaxime Chevallier if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK) 51562551dc9eSMaxime Chevallier return -EINVAL; 51572551dc9eSMaxime Chevallier 51582551dc9eSMaxime Chevallier /* Enable bw limit algorithm version 3 */ 51592551dc9eSMaxime Chevallier val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); 51602551dc9eSMaxime Chevallier val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); 51612551dc9eSMaxime Chevallier mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); 51622551dc9eSMaxime Chevallier 51632551dc9eSMaxime Chevallier /* Set the base refill rate */ 51642551dc9eSMaxime Chevallier mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles); 51652551dc9eSMaxime Chevallier 51662551dc9eSMaxime Chevallier return 0; 51672551dc9eSMaxime Chevallier } 51682551dc9eSMaxime Chevallier 51692551dc9eSMaxime Chevallier static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp) 51702551dc9eSMaxime Chevallier { 51712551dc9eSMaxime Chevallier u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); 51722551dc9eSMaxime Chevallier 51732551dc9eSMaxime Chevallier val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); 51742551dc9eSMaxime Chevallier mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); 51752551dc9eSMaxime Chevallier } 51762551dc9eSMaxime Chevallier 51772551dc9eSMaxime Chevallier static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue, 51782551dc9eSMaxime Chevallier u64 min_rate, u64 max_rate) 51792551dc9eSMaxime Chevallier { 51802551dc9eSMaxime Chevallier u32 refill_val, rem; 51812551dc9eSMaxime Chevallier u32 val = 0; 51822551dc9eSMaxime Chevallier 51832551dc9eSMaxime Chevallier /* Convert to from Bps to bps */ 51842551dc9eSMaxime Chevallier max_rate *= 8; 51852551dc9eSMaxime Chevallier 51862551dc9eSMaxime Chevallier if (min_rate) 51872551dc9eSMaxime Chevallier return -EINVAL; 51882551dc9eSMaxime Chevallier 51892551dc9eSMaxime Chevallier refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION, 51902551dc9eSMaxime Chevallier &rem); 51912551dc9eSMaxime Chevallier 51922551dc9eSMaxime Chevallier if (rem || !refill_val || 51932551dc9eSMaxime Chevallier refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX) 51942551dc9eSMaxime Chevallier return -EINVAL; 51952551dc9eSMaxime Chevallier 51962551dc9eSMaxime Chevallier val = refill_val; 51972551dc9eSMaxime Chevallier val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD << 51982551dc9eSMaxime Chevallier MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT); 51992551dc9eSMaxime Chevallier 52002551dc9eSMaxime Chevallier mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val); 52012551dc9eSMaxime Chevallier 52022551dc9eSMaxime Chevallier return 0; 52032551dc9eSMaxime Chevallier } 52042551dc9eSMaxime Chevallier 52054906887aSMaxime Chevallier static int mvneta_setup_mqprio(struct net_device *dev, 520675fa71e3SMaxime Chevallier struct tc_mqprio_qopt_offload *mqprio) 52074906887aSMaxime Chevallier { 52084906887aSMaxime Chevallier struct mvneta_port *pp = netdev_priv(dev); 52092551dc9eSMaxime Chevallier int rxq, txq, tc, ret; 52104906887aSMaxime Chevallier u8 num_tc; 52114906887aSMaxime Chevallier 5212e7ca75feSMaxime Chevallier if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) 5213e7ca75feSMaxime Chevallier return 0; 5214e7ca75feSMaxime Chevallier 521575fa71e3SMaxime Chevallier num_tc = mqprio->qopt.num_tc; 52164906887aSMaxime Chevallier 52174906887aSMaxime Chevallier if (num_tc > rxq_number) 52184906887aSMaxime Chevallier return -EINVAL; 52194906887aSMaxime Chevallier 52204906887aSMaxime Chevallier mvneta_clear_rx_prio_map(pp); 5221e9f7099dSMaxime Chevallier 5222e9f7099dSMaxime Chevallier if (!num_tc) { 52232551dc9eSMaxime Chevallier mvneta_disable_per_queue_rate_limit(pp); 52244906887aSMaxime Chevallier netdev_reset_tc(dev); 52254906887aSMaxime Chevallier return 0; 52264906887aSMaxime Chevallier } 52274906887aSMaxime Chevallier 522875fa71e3SMaxime Chevallier netdev_set_num_tc(dev, mqprio->qopt.num_tc); 5229e9f7099dSMaxime Chevallier 5230e9f7099dSMaxime Chevallier for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { 5231e9f7099dSMaxime Chevallier netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc], 5232e9f7099dSMaxime Chevallier mqprio->qopt.offset[tc]); 5233e9f7099dSMaxime Chevallier 5234e9f7099dSMaxime Chevallier for (rxq = mqprio->qopt.offset[tc]; 5235e9f7099dSMaxime Chevallier rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; 5236e9f7099dSMaxime Chevallier rxq++) { 5237e9f7099dSMaxime Chevallier if (rxq >= rxq_number) 5238e9f7099dSMaxime Chevallier return -EINVAL; 5239e9f7099dSMaxime Chevallier 5240e9f7099dSMaxime Chevallier mvneta_map_vlan_prio_to_rxq(pp, tc, rxq); 5241e9f7099dSMaxime Chevallier } 5242e9f7099dSMaxime Chevallier } 52434906887aSMaxime Chevallier 52442551dc9eSMaxime Chevallier if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) { 52452551dc9eSMaxime Chevallier mvneta_disable_per_queue_rate_limit(pp); 52462551dc9eSMaxime Chevallier return 0; 52472551dc9eSMaxime Chevallier } 52482551dc9eSMaxime Chevallier 52492551dc9eSMaxime Chevallier if (mqprio->qopt.num_tc > txq_number) 52502551dc9eSMaxime Chevallier return -EINVAL; 52512551dc9eSMaxime Chevallier 52522551dc9eSMaxime Chevallier ret = mvneta_enable_per_queue_rate_limit(pp); 52532551dc9eSMaxime Chevallier if (ret) 52542551dc9eSMaxime Chevallier return ret; 52552551dc9eSMaxime Chevallier 52562551dc9eSMaxime Chevallier for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { 52572551dc9eSMaxime Chevallier for (txq = mqprio->qopt.offset[tc]; 52582551dc9eSMaxime Chevallier txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; 52592551dc9eSMaxime Chevallier txq++) { 52602551dc9eSMaxime Chevallier if (txq >= txq_number) 52612551dc9eSMaxime Chevallier return -EINVAL; 52622551dc9eSMaxime Chevallier 52632551dc9eSMaxime Chevallier ret = mvneta_setup_queue_rates(pp, txq, 52642551dc9eSMaxime Chevallier mqprio->min_rate[tc], 52652551dc9eSMaxime Chevallier mqprio->max_rate[tc]); 52662551dc9eSMaxime Chevallier if (ret) 52672551dc9eSMaxime Chevallier return ret; 52682551dc9eSMaxime Chevallier } 52692551dc9eSMaxime Chevallier } 52702551dc9eSMaxime Chevallier 52714906887aSMaxime Chevallier return 0; 52724906887aSMaxime Chevallier } 52734906887aSMaxime Chevallier 52744906887aSMaxime Chevallier static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type, 52754906887aSMaxime Chevallier void *type_data) 52764906887aSMaxime Chevallier { 52774906887aSMaxime Chevallier switch (type) { 52784906887aSMaxime Chevallier case TC_SETUP_QDISC_MQPRIO: 52794906887aSMaxime Chevallier return mvneta_setup_mqprio(dev, type_data); 52804906887aSMaxime Chevallier default: 52814906887aSMaxime Chevallier return -EOPNOTSUPP; 52824906887aSMaxime Chevallier } 52834906887aSMaxime Chevallier } 52844906887aSMaxime Chevallier 5285c5aff182SThomas Petazzoni static const struct net_device_ops mvneta_netdev_ops = { 5286c5aff182SThomas Petazzoni .ndo_open = mvneta_open, 5287c5aff182SThomas Petazzoni .ndo_stop = mvneta_stop, 5288c5aff182SThomas Petazzoni .ndo_start_xmit = mvneta_tx, 5289c5aff182SThomas Petazzoni .ndo_set_rx_mode = mvneta_set_rx_mode, 5290c5aff182SThomas Petazzoni .ndo_set_mac_address = mvneta_set_mac_addr, 5291c5aff182SThomas Petazzoni .ndo_change_mtu = mvneta_change_mtu, 5292b65657fcSSimon Guinot .ndo_fix_features = mvneta_fix_features, 5293c5aff182SThomas Petazzoni .ndo_get_stats64 = mvneta_get_stats64, 5294a7605370SArnd Bergmann .ndo_eth_ioctl = mvneta_ioctl, 52950db51da7SLorenzo Bianconi .ndo_bpf = mvneta_xdp, 5296b0a43db9SLorenzo Bianconi .ndo_xdp_xmit = mvneta_xdp_xmit, 52974906887aSMaxime Chevallier .ndo_setup_tc = mvneta_setup_tc, 5298c5aff182SThomas Petazzoni }; 5299c5aff182SThomas Petazzoni 53004581be42SJisheng Zhang static const struct ethtool_ops mvneta_eth_tool_ops = { 530116e8d8b3SJakub Kicinski .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 530216e8d8b3SJakub Kicinski ETHTOOL_COALESCE_MAX_FRAMES, 5303503f9aa9SRussell King .nway_reset = mvneta_ethtool_nway_reset, 5304c5aff182SThomas Petazzoni .get_link = ethtool_op_get_link, 5305c5aff182SThomas Petazzoni .set_coalesce = mvneta_ethtool_set_coalesce, 5306c5aff182SThomas Petazzoni .get_coalesce = mvneta_ethtool_get_coalesce, 5307c5aff182SThomas Petazzoni .get_drvinfo = mvneta_ethtool_get_drvinfo, 5308c5aff182SThomas Petazzoni .get_ringparam = mvneta_ethtool_get_ringparam, 5309c5aff182SThomas Petazzoni .set_ringparam = mvneta_ethtool_set_ringparam, 53104932a918SRussell King .get_pauseparam = mvneta_ethtool_get_pauseparam, 53114932a918SRussell King .set_pauseparam = mvneta_ethtool_set_pauseparam, 53129b0cdefaSRussell King .get_strings = mvneta_ethtool_get_strings, 53139b0cdefaSRussell King .get_ethtool_stats = mvneta_ethtool_get_stats, 53149b0cdefaSRussell King .get_sset_count = mvneta_ethtool_get_sset_count, 53159a401deaSGregory CLEMENT .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, 53169a401deaSGregory CLEMENT .get_rxnfc = mvneta_ethtool_get_rxnfc, 53179a401deaSGregory CLEMENT .get_rxfh = mvneta_ethtool_get_rxfh, 53189a401deaSGregory CLEMENT .set_rxfh = mvneta_ethtool_set_rxfh, 5319503f9aa9SRussell King .get_link_ksettings = mvneta_ethtool_get_link_ksettings, 5320013ad40dSPhilippe Reynes .set_link_ksettings = mvneta_ethtool_set_link_ksettings, 5321b60a00f9SJingju Hou .get_wol = mvneta_ethtool_get_wol, 5322b60a00f9SJingju Hou .set_wol = mvneta_ethtool_set_wol, 53236d81f451SRussell King .get_eee = mvneta_ethtool_get_eee, 53246d81f451SRussell King .set_eee = mvneta_ethtool_set_eee, 5325c5aff182SThomas Petazzoni }; 5326c5aff182SThomas Petazzoni 5327c5aff182SThomas Petazzoni /* Initialize hw */ 53289672850bSEzequiel Garcia static int mvneta_init(struct device *dev, struct mvneta_port *pp) 5329c5aff182SThomas Petazzoni { 5330c5aff182SThomas Petazzoni int queue; 5331c5aff182SThomas Petazzoni 5332c5aff182SThomas Petazzoni /* Disable port */ 5333c5aff182SThomas Petazzoni mvneta_port_disable(pp); 5334c5aff182SThomas Petazzoni 5335c5aff182SThomas Petazzoni /* Set port default values */ 5336c5aff182SThomas Petazzoni mvneta_defaults_set(pp); 5337c5aff182SThomas Petazzoni 53385d6312edSMarkus Elfring pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); 5339c5aff182SThomas Petazzoni if (!pp->txqs) 5340c5aff182SThomas Petazzoni return -ENOMEM; 5341c5aff182SThomas Petazzoni 5342c5aff182SThomas Petazzoni /* Initialize TX descriptor rings */ 5343c5aff182SThomas Petazzoni for (queue = 0; queue < txq_number; queue++) { 5344c5aff182SThomas Petazzoni struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5345c5aff182SThomas Petazzoni txq->id = queue; 5346c5aff182SThomas Petazzoni txq->size = pp->tx_ring_size; 5347c5aff182SThomas Petazzoni txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 5348c5aff182SThomas Petazzoni } 5349c5aff182SThomas Petazzoni 53505d6312edSMarkus Elfring pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); 53519672850bSEzequiel Garcia if (!pp->rxqs) 5352c5aff182SThomas Petazzoni return -ENOMEM; 5353c5aff182SThomas Petazzoni 5354c5aff182SThomas Petazzoni /* Create Rx descriptor rings */ 5355c5aff182SThomas Petazzoni for (queue = 0; queue < rxq_number; queue++) { 5356c5aff182SThomas Petazzoni struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5357c5aff182SThomas Petazzoni rxq->id = queue; 5358c5aff182SThomas Petazzoni rxq->size = pp->rx_ring_size; 5359c5aff182SThomas Petazzoni rxq->pkts_coal = MVNETA_RX_COAL_PKTS; 5360c5aff182SThomas Petazzoni rxq->time_coal = MVNETA_RX_COAL_USEC; 536129110630SMarkus Elfring rxq->buf_virt_addr 536229110630SMarkus Elfring = devm_kmalloc_array(pp->dev->dev.parent, 536329110630SMarkus Elfring rxq->size, 536429110630SMarkus Elfring sizeof(*rxq->buf_virt_addr), 5365f88bee1cSGregory CLEMENT GFP_KERNEL); 5366f88bee1cSGregory CLEMENT if (!rxq->buf_virt_addr) 5367f88bee1cSGregory CLEMENT return -ENOMEM; 5368c5aff182SThomas Petazzoni } 5369c5aff182SThomas Petazzoni 5370c5aff182SThomas Petazzoni return 0; 5371c5aff182SThomas Petazzoni } 5372c5aff182SThomas Petazzoni 5373c5aff182SThomas Petazzoni /* platform glue : initialize decoding windows */ 537403ce758eSGreg KH static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 5375c5aff182SThomas Petazzoni const struct mbus_dram_target_info *dram) 5376c5aff182SThomas Petazzoni { 5377c5aff182SThomas Petazzoni u32 win_enable; 5378c5aff182SThomas Petazzoni u32 win_protect; 5379c5aff182SThomas Petazzoni int i; 5380c5aff182SThomas Petazzoni 5381c5aff182SThomas Petazzoni for (i = 0; i < 6; i++) { 5382c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 5383c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 5384c5aff182SThomas Petazzoni 5385c5aff182SThomas Petazzoni if (i < 4) 5386c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 5387c5aff182SThomas Petazzoni } 5388c5aff182SThomas Petazzoni 5389c5aff182SThomas Petazzoni win_enable = 0x3f; 5390c5aff182SThomas Petazzoni win_protect = 0; 5391c5aff182SThomas Petazzoni 53922636ac3cSMarcin Wojtas if (dram) { 5393c5aff182SThomas Petazzoni for (i = 0; i < dram->num_cs; i++) { 5394c5aff182SThomas Petazzoni const struct mbus_dram_window *cs = dram->cs + i; 53952636ac3cSMarcin Wojtas 53962636ac3cSMarcin Wojtas mvreg_write(pp, MVNETA_WIN_BASE(i), 53972636ac3cSMarcin Wojtas (cs->base & 0xffff0000) | 53982636ac3cSMarcin Wojtas (cs->mbus_attr << 8) | 53992636ac3cSMarcin Wojtas dram->mbus_dram_target_id); 5400c5aff182SThomas Petazzoni 5401c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_WIN_SIZE(i), 5402c5aff182SThomas Petazzoni (cs->size - 1) & 0xffff0000); 5403c5aff182SThomas Petazzoni 5404c5aff182SThomas Petazzoni win_enable &= ~(1 << i); 5405c5aff182SThomas Petazzoni win_protect |= 3 << (2 * i); 5406c5aff182SThomas Petazzoni } 54072636ac3cSMarcin Wojtas } else { 54082d2a514cSChris Packham if (pp->neta_ac5) 54092d2a514cSChris Packham mvreg_write(pp, MVNETA_WIN_BASE(0), 54102d2a514cSChris Packham (MVNETA_AC5_CNM_DDR_ATTR << 8) | 54112d2a514cSChris Packham MVNETA_AC5_CNM_DDR_TARGET); 54122636ac3cSMarcin Wojtas /* For Armada3700 open default 4GB Mbus window, leaving 54132636ac3cSMarcin Wojtas * arbitration of target/attribute to a different layer 54142636ac3cSMarcin Wojtas * of configuration. 54152636ac3cSMarcin Wojtas */ 54162636ac3cSMarcin Wojtas mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); 54172636ac3cSMarcin Wojtas win_enable &= ~BIT(0); 54182636ac3cSMarcin Wojtas win_protect = 3; 54192636ac3cSMarcin Wojtas } 5420c5aff182SThomas Petazzoni 5421c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 5422db6ba9a5SMarcin Wojtas mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 5423c5aff182SThomas Petazzoni } 5424c5aff182SThomas Petazzoni 5425c5aff182SThomas Petazzoni /* Power up the port */ 54263f1dd4bcSThomas Petazzoni static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 5427c5aff182SThomas Petazzoni { 5428c5aff182SThomas Petazzoni /* MAC Cause register should be cleared */ 5429c5aff182SThomas Petazzoni mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 5430c5aff182SThomas Petazzoni 543141c2b6b4SSascha Hauer if (phy_mode != PHY_INTERFACE_MODE_QSGMII && 543241c2b6b4SSascha Hauer phy_mode != PHY_INTERFACE_MODE_SGMII && 543341c2b6b4SSascha Hauer !phy_interface_mode_is_8023z(phy_mode) && 543441c2b6b4SSascha Hauer !phy_interface_mode_is_rgmii(phy_mode)) 54353f1dd4bcSThomas Petazzoni return -EINVAL; 54363f1dd4bcSThomas Petazzoni 54373f1dd4bcSThomas Petazzoni return 0; 5438c5aff182SThomas Petazzoni } 5439c5aff182SThomas Petazzoni 5440c5aff182SThomas Petazzoni /* Device initialization routine */ 544103ce758eSGreg KH static int mvneta_probe(struct platform_device *pdev) 5442c5aff182SThomas Petazzoni { 5443c5aff182SThomas Petazzoni struct device_node *dn = pdev->dev.of_node; 5444dc35a10fSMarcin Wojtas struct device_node *bm_node; 5445c5aff182SThomas Petazzoni struct mvneta_port *pp; 5446c5aff182SThomas Petazzoni struct net_device *dev; 5447503f9aa9SRussell King struct phylink *phylink; 5448a10c1c81SRussell King struct phy *comphy; 54498cc3e439SThomas Petazzoni char hw_mac_addr[ETH_ALEN]; 54500c65b2b9SAndrew Lunn phy_interface_t phy_mode; 54518cc3e439SThomas Petazzoni const char *mac_from; 54529110ee07SMarcin Wojtas int tx_csum_limit; 5453c5aff182SThomas Petazzoni int err; 545412bb03b4SMaxime Ripard int cpu; 5455c5aff182SThomas Petazzoni 5456a3ddd94fSRosen Penev dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port), 5457a3ddd94fSRosen Penev txq_number, rxq_number); 5458c5aff182SThomas Petazzoni if (!dev) 5459c5aff182SThomas Petazzoni return -ENOMEM; 5460c5aff182SThomas Petazzoni 546172bb9531SRussell King (Oracle) dev->tx_queue_len = MVNETA_MAX_TXD; 546272bb9531SRussell King (Oracle) dev->watchdog_timeo = 5 * HZ; 546372bb9531SRussell King (Oracle) dev->netdev_ops = &mvneta_netdev_ops; 546472bb9531SRussell King (Oracle) dev->ethtool_ops = &mvneta_eth_tool_ops; 546572bb9531SRussell King (Oracle) 546672bb9531SRussell King (Oracle) pp = netdev_priv(dev); 546772bb9531SRussell King (Oracle) spin_lock_init(&pp->lock); 546872bb9531SRussell King (Oracle) pp->dn = dn; 546972bb9531SRussell King (Oracle) 547072bb9531SRussell King (Oracle) pp->rxq_def = rxq_def; 547172bb9531SRussell King (Oracle) pp->indir[0] = rxq_def; 5472c5aff182SThomas Petazzoni 54730c65b2b9SAndrew Lunn err = of_get_phy_mode(dn, &phy_mode); 54740c65b2b9SAndrew Lunn if (err) { 5475c5aff182SThomas Petazzoni dev_err(&pdev->dev, "incorrect phy-mode\n"); 547672bb9531SRussell King (Oracle) return err; 5477503f9aa9SRussell King } 5478503f9aa9SRussell King 547972bb9531SRussell King (Oracle) pp->phy_interface = phy_mode; 548072bb9531SRussell King (Oracle) 5481a10c1c81SRussell King comphy = devm_of_phy_get(&pdev->dev, dn, NULL); 548272bb9531SRussell King (Oracle) if (comphy == ERR_PTR(-EPROBE_DEFER)) 548372bb9531SRussell King (Oracle) return -EPROBE_DEFER; 548472bb9531SRussell King (Oracle) 548572bb9531SRussell King (Oracle) if (IS_ERR(comphy)) 5486a10c1c81SRussell King comphy = NULL; 548772bb9531SRussell King (Oracle) 548872bb9531SRussell King (Oracle) pp->comphy = comphy; 548972bb9531SRussell King (Oracle) 549072bb9531SRussell King (Oracle) pp->base = devm_platform_ioremap_resource(pdev, 0); 549172bb9531SRussell King (Oracle) if (IS_ERR(pp->base)) 549272bb9531SRussell King (Oracle) return PTR_ERR(pp->base); 549372bb9531SRussell King (Oracle) 549472bb9531SRussell King (Oracle) /* Get special SoC configurations */ 549572bb9531SRussell King (Oracle) if (of_device_is_compatible(dn, "marvell,armada-3700-neta")) 549672bb9531SRussell King (Oracle) pp->neta_armada3700 = true; 54972d2a514cSChris Packham if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) { 54982d2a514cSChris Packham pp->neta_armada3700 = true; 54992d2a514cSChris Packham pp->neta_ac5 = true; 55002d2a514cSChris Packham } 550172bb9531SRussell King (Oracle) 550272bb9531SRussell King (Oracle) dev->irq = irq_of_parse_and_map(dn, 0); 550372bb9531SRussell King (Oracle) if (dev->irq == 0) 550472bb9531SRussell King (Oracle) return -EINVAL; 550572bb9531SRussell King (Oracle) 550672bb9531SRussell King (Oracle) pp->clk = devm_clk_get(&pdev->dev, "core"); 550772bb9531SRussell King (Oracle) if (IS_ERR(pp->clk)) 550872bb9531SRussell King (Oracle) pp->clk = devm_clk_get(&pdev->dev, NULL); 550972bb9531SRussell King (Oracle) if (IS_ERR(pp->clk)) { 551072bb9531SRussell King (Oracle) err = PTR_ERR(pp->clk); 551172bb9531SRussell King (Oracle) goto err_free_irq; 5512a10c1c81SRussell King } 5513a10c1c81SRussell King 551472bb9531SRussell King (Oracle) clk_prepare_enable(pp->clk); 551572bb9531SRussell King (Oracle) 551672bb9531SRussell King (Oracle) pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); 551772bb9531SRussell King (Oracle) if (!IS_ERR(pp->clk_bus)) 551872bb9531SRussell King (Oracle) clk_prepare_enable(pp->clk_bus); 551972bb9531SRussell King (Oracle) 552072bb9531SRussell King (Oracle) pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; 5521140d1002SRussell King (Oracle) pp->phylink_pcs.neg_mode = true; 552244cc27e4SIoana Ciornei 552344cc27e4SIoana Ciornei pp->phylink_config.dev = &dev->dev; 552444cc27e4SIoana Ciornei pp->phylink_config.type = PHYLINK_NETDEV; 552502a0988bSRussell King (Oracle) pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | 552602a0988bSRussell King (Oracle) MAC_100 | MAC_1000FD | MAC_2500FD; 552702a0988bSRussell King (Oracle) 5528fdedb695SRussell King phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); 5529fdedb695SRussell King __set_bit(PHY_INTERFACE_MODE_QSGMII, 5530fdedb695SRussell King pp->phylink_config.supported_interfaces); 5531fdedb695SRussell King if (comphy) { 5532fdedb695SRussell King /* If a COMPHY is present, we can support any of the serdes 5533fdedb695SRussell King * modes and switch between them. 5534fdedb695SRussell King */ 5535fdedb695SRussell King __set_bit(PHY_INTERFACE_MODE_SGMII, 5536fdedb695SRussell King pp->phylink_config.supported_interfaces); 5537fdedb695SRussell King __set_bit(PHY_INTERFACE_MODE_1000BASEX, 5538fdedb695SRussell King pp->phylink_config.supported_interfaces); 5539fdedb695SRussell King __set_bit(PHY_INTERFACE_MODE_2500BASEX, 5540fdedb695SRussell King pp->phylink_config.supported_interfaces); 5541fdedb695SRussell King } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { 5542fdedb695SRussell King /* No COMPHY, with only 2500BASE-X mode supported */ 5543fdedb695SRussell King __set_bit(PHY_INTERFACE_MODE_2500BASEX, 5544fdedb695SRussell King pp->phylink_config.supported_interfaces); 5545fdedb695SRussell King } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || 5546fdedb695SRussell King phy_mode == PHY_INTERFACE_MODE_SGMII) { 5547fdedb695SRussell King /* No COMPHY, we can switch between 1000BASE-X and SGMII */ 5548fdedb695SRussell King __set_bit(PHY_INTERFACE_MODE_1000BASEX, 5549fdedb695SRussell King pp->phylink_config.supported_interfaces); 5550fdedb695SRussell King __set_bit(PHY_INTERFACE_MODE_SGMII, 5551fdedb695SRussell King pp->phylink_config.supported_interfaces); 5552fdedb695SRussell King } 555344cc27e4SIoana Ciornei 555444cc27e4SIoana Ciornei phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, 555544cc27e4SIoana Ciornei phy_mode, &mvneta_phylink_ops); 5556503f9aa9SRussell King if (IS_ERR(phylink)) { 5557503f9aa9SRussell King err = PTR_ERR(phylink); 55585445eaf3SArnaud Patard \(Rtp\) goto err_clk; 55595445eaf3SArnaud Patard \(Rtp\) } 55605445eaf3SArnaud Patard \(Rtp\) 556172bb9531SRussell King (Oracle) pp->phylink = phylink; 556272bb9531SRussell King (Oracle) 556312bb03b4SMaxime Ripard /* Alloc per-cpu port structure */ 556412bb03b4SMaxime Ripard pp->ports = alloc_percpu(struct mvneta_pcpu_port); 556512bb03b4SMaxime Ripard if (!pp->ports) { 556612bb03b4SMaxime Ripard err = -ENOMEM; 556772bb9531SRussell King (Oracle) goto err_free_phylink; 556812bb03b4SMaxime Ripard } 556912bb03b4SMaxime Ripard 557074c41b04Swilly tarreau /* Alloc per-cpu stats */ 55711c213bd2SWANG Cong pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); 557274c41b04Swilly tarreau if (!pp->stats) { 557374c41b04Swilly tarreau err = -ENOMEM; 557412bb03b4SMaxime Ripard goto err_free_ports; 557574c41b04Swilly tarreau } 557674c41b04Swilly tarreau 55779ca01b25SJakub Kicinski err = of_get_ethdev_address(dn, dev); 557883216e39SMichael Walle if (!err) { 55798cc3e439SThomas Petazzoni mac_from = "device tree"; 55808cc3e439SThomas Petazzoni } else { 55818cc3e439SThomas Petazzoni mvneta_get_mac_addr(pp, hw_mac_addr); 55828cc3e439SThomas Petazzoni if (is_valid_ether_addr(hw_mac_addr)) { 55838cc3e439SThomas Petazzoni mac_from = "hardware"; 5584a96d317fSJakub Kicinski eth_hw_addr_set(dev, hw_mac_addr); 55858cc3e439SThomas Petazzoni } else { 55868cc3e439SThomas Petazzoni mac_from = "random"; 55878cc3e439SThomas Petazzoni eth_hw_addr_random(dev); 55888cc3e439SThomas Petazzoni } 55898cc3e439SThomas Petazzoni } 55908cc3e439SThomas Petazzoni 55919110ee07SMarcin Wojtas if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { 55929110ee07SMarcin Wojtas if (tx_csum_limit < 0 || 55939110ee07SMarcin Wojtas tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { 55949110ee07SMarcin Wojtas tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 55959110ee07SMarcin Wojtas dev_info(&pdev->dev, 55969110ee07SMarcin Wojtas "Wrong TX csum limit in DT, set to %dB\n", 55979110ee07SMarcin Wojtas MVNETA_TX_CSUM_DEF_SIZE); 55989110ee07SMarcin Wojtas } 55999110ee07SMarcin Wojtas } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { 56009110ee07SMarcin Wojtas tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 56019110ee07SMarcin Wojtas } else { 56029110ee07SMarcin Wojtas tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; 56039110ee07SMarcin Wojtas } 56049110ee07SMarcin Wojtas 56059110ee07SMarcin Wojtas pp->tx_csum_limit = tx_csum_limit; 5606b65657fcSSimon Guinot 56079768b45cSJane Li pp->dram_target_info = mv_mbus_dram_info(); 56082636ac3cSMarcin Wojtas /* Armada3700 requires setting default configuration of Mbus 56092636ac3cSMarcin Wojtas * windows, however without using filled mbus_dram_target_info 56102636ac3cSMarcin Wojtas * structure. 56112636ac3cSMarcin Wojtas */ 56129768b45cSJane Li if (pp->dram_target_info || pp->neta_armada3700) 56139768b45cSJane Li mvneta_conf_mbus_windows(pp, pp->dram_target_info); 5614dc35a10fSMarcin Wojtas 5615c5aff182SThomas Petazzoni pp->tx_ring_size = MVNETA_MAX_TXD; 5616c5aff182SThomas Petazzoni pp->rx_ring_size = MVNETA_MAX_RXD; 5617c5aff182SThomas Petazzoni 5618c5aff182SThomas Petazzoni pp->dev = dev; 5619c5aff182SThomas Petazzoni SET_NETDEV_DEV(dev, &pdev->dev); 5620c5aff182SThomas Petazzoni 5621dc35a10fSMarcin Wojtas pp->id = global_port_id++; 5622dc35a10fSMarcin Wojtas 5623dc35a10fSMarcin Wojtas /* Obtain access to BM resources if enabled and already initialized */ 5624dc35a10fSMarcin Wojtas bm_node = of_parse_phandle(dn, "buffer-manager", 0); 5625965cbbecSGregory CLEMENT if (bm_node) { 5626965cbbecSGregory CLEMENT pp->bm_priv = mvneta_bm_get(bm_node); 5627965cbbecSGregory CLEMENT if (pp->bm_priv) { 5628dc35a10fSMarcin Wojtas err = mvneta_bm_port_init(pdev, pp); 5629dc35a10fSMarcin Wojtas if (err < 0) { 5630965cbbecSGregory CLEMENT dev_info(&pdev->dev, 5631965cbbecSGregory CLEMENT "use SW buffer management\n"); 5632965cbbecSGregory CLEMENT mvneta_bm_put(pp->bm_priv); 5633dc35a10fSMarcin Wojtas pp->bm_priv = NULL; 5634dc35a10fSMarcin Wojtas } 5635dc35a10fSMarcin Wojtas } 5636562e2f46SYelena Krivosheev /* Set RX packet offset correction for platforms, whose 5637562e2f46SYelena Krivosheev * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit 5638562e2f46SYelena Krivosheev * platforms and 0B for 32-bit ones. 5639562e2f46SYelena Krivosheev */ 5640562e2f46SYelena Krivosheev pp->rx_offset_correction = max(0, 5641562e2f46SYelena Krivosheev NET_SKB_PAD - 5642562e2f46SYelena Krivosheev MVNETA_RX_PKT_OFFSET_CORRECTION); 5643965cbbecSGregory CLEMENT } 5644d4e4da00SPeter Chen of_node_put(bm_node); 5645dc35a10fSMarcin Wojtas 564644efc78dSLorenzo Bianconi /* sw buffer management */ 564744efc78dSLorenzo Bianconi if (!pp->bm_priv) 564844efc78dSLorenzo Bianconi pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 564944efc78dSLorenzo Bianconi 56509672850bSEzequiel Garcia err = mvneta_init(&pdev->dev, pp); 56519672850bSEzequiel Garcia if (err < 0) 5652dc35a10fSMarcin Wojtas goto err_netdev; 56533f1dd4bcSThomas Petazzoni 565441c2b6b4SSascha Hauer err = mvneta_port_power_up(pp, pp->phy_interface); 56553f1dd4bcSThomas Petazzoni if (err < 0) { 56563f1dd4bcSThomas Petazzoni dev_err(&pdev->dev, "can't power up port\n"); 565758f60329SDinghao Liu goto err_netdev; 56583f1dd4bcSThomas Petazzoni } 5659c5aff182SThomas Petazzoni 56602636ac3cSMarcin Wojtas /* Armada3700 network controller does not support per-cpu 56612636ac3cSMarcin Wojtas * operation, so only single NAPI should be initialized. 56622636ac3cSMarcin Wojtas */ 56632636ac3cSMarcin Wojtas if (pp->neta_armada3700) { 5664b48b89f9SJakub Kicinski netif_napi_add(dev, &pp->napi, mvneta_poll); 56652636ac3cSMarcin Wojtas } else { 566612bb03b4SMaxime Ripard for_each_present_cpu(cpu) { 56672636ac3cSMarcin Wojtas struct mvneta_pcpu_port *port = 56682636ac3cSMarcin Wojtas per_cpu_ptr(pp->ports, cpu); 566912bb03b4SMaxime Ripard 5670b48b89f9SJakub Kicinski netif_napi_add(dev, &port->napi, mvneta_poll); 567112bb03b4SMaxime Ripard port->pp = pp; 567212bb03b4SMaxime Ripard } 56732636ac3cSMarcin Wojtas } 5674c5aff182SThomas Petazzoni 56757772988aSJisheng Zhang dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 56767772988aSJisheng Zhang NETIF_F_TSO | NETIF_F_RXCSUM; 567701ef26caSEzequiel Garcia dev->hw_features |= dev->features; 567801ef26caSEzequiel Garcia dev->vlan_features |= dev->features; 56791dc55923SLorenzo Bianconi if (!pp->bm_priv) 56801dc55923SLorenzo Bianconi dev->xdp_features = NETDEV_XDP_ACT_BASIC | 56811dc55923SLorenzo Bianconi NETDEV_XDP_ACT_REDIRECT | 56821dc55923SLorenzo Bianconi NETDEV_XDP_ACT_NDO_XMIT | 56831dc55923SLorenzo Bianconi NETDEV_XDP_ACT_RX_SG | 568466c0e13aSMarek Majtyka NETDEV_XDP_ACT_NDO_XMIT_SG; 568597db8afaSAndrew Lunn dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 5686ee8b7a11SJakub Kicinski netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS); 5687b50b72deSwilly tarreau 56885777987eSJarod Wilson /* MTU range: 68 - 9676 */ 56895777987eSJarod Wilson dev->min_mtu = ETH_MIN_MTU; 56905777987eSJarod Wilson /* 9676 == 9700 - 20 and rounding to 8 */ 56915777987eSJarod Wilson dev->max_mtu = 9676; 56925777987eSJarod Wilson 5693c5aff182SThomas Petazzoni err = register_netdev(dev); 5694c5aff182SThomas Petazzoni if (err < 0) { 5695c5aff182SThomas Petazzoni dev_err(&pdev->dev, "failed to register\n"); 5696d484e06eSJisheng Zhang goto err_netdev; 5697c5aff182SThomas Petazzoni } 5698c5aff182SThomas Petazzoni 56998cc3e439SThomas Petazzoni netdev_info(dev, "Using %s mac address %pM\n", mac_from, 57008cc3e439SThomas Petazzoni dev->dev_addr); 5701c5aff182SThomas Petazzoni 5702c5aff182SThomas Petazzoni platform_set_drvdata(pdev, pp->dev); 5703c5aff182SThomas Petazzoni 5704c5aff182SThomas Petazzoni return 0; 5705c5aff182SThomas Petazzoni 5706dc35a10fSMarcin Wojtas err_netdev: 5707dc35a10fSMarcin Wojtas if (pp->bm_priv) { 5708dc35a10fSMarcin Wojtas mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 5709dc35a10fSMarcin Wojtas mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 5710dc35a10fSMarcin Wojtas 1 << pp->id); 5711965cbbecSGregory CLEMENT mvneta_bm_put(pp->bm_priv); 5712dc35a10fSMarcin Wojtas } 571374c41b04Swilly tarreau free_percpu(pp->stats); 571412bb03b4SMaxime Ripard err_free_ports: 571512bb03b4SMaxime Ripard free_percpu(pp->ports); 5716503f9aa9SRussell King err_free_phylink: 5717503f9aa9SRussell King if (pp->phylink) 5718503f9aa9SRussell King phylink_destroy(pp->phylink); 571972bb9531SRussell King (Oracle) err_clk: 572072bb9531SRussell King (Oracle) clk_disable_unprepare(pp->clk_bus); 572172bb9531SRussell King (Oracle) clk_disable_unprepare(pp->clk); 5722c5aff182SThomas Petazzoni err_free_irq: 5723c5aff182SThomas Petazzoni irq_dispose_mapping(dev->irq); 5724c5aff182SThomas Petazzoni return err; 5725c5aff182SThomas Petazzoni } 5726c5aff182SThomas Petazzoni 5727c5aff182SThomas Petazzoni /* Device removal routine */ 572803ce758eSGreg KH static int mvneta_remove(struct platform_device *pdev) 5729c5aff182SThomas Petazzoni { 5730c5aff182SThomas Petazzoni struct net_device *dev = platform_get_drvdata(pdev); 5731c5aff182SThomas Petazzoni struct mvneta_port *pp = netdev_priv(dev); 5732c5aff182SThomas Petazzoni 5733c5aff182SThomas Petazzoni unregister_netdev(dev); 573415cc4a4aSJisheng Zhang clk_disable_unprepare(pp->clk_bus); 5735189dd626SThomas Petazzoni clk_disable_unprepare(pp->clk); 573612bb03b4SMaxime Ripard free_percpu(pp->ports); 573774c41b04Swilly tarreau free_percpu(pp->stats); 5738c5aff182SThomas Petazzoni irq_dispose_mapping(dev->irq); 5739503f9aa9SRussell King phylink_destroy(pp->phylink); 5740c5aff182SThomas Petazzoni 5741dc35a10fSMarcin Wojtas if (pp->bm_priv) { 5742dc35a10fSMarcin Wojtas mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 5743dc35a10fSMarcin Wojtas mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 5744dc35a10fSMarcin Wojtas 1 << pp->id); 5745965cbbecSGregory CLEMENT mvneta_bm_put(pp->bm_priv); 5746dc35a10fSMarcin Wojtas } 5747dc35a10fSMarcin Wojtas 5748c5aff182SThomas Petazzoni return 0; 5749c5aff182SThomas Petazzoni } 5750c5aff182SThomas Petazzoni 57519768b45cSJane Li #ifdef CONFIG_PM_SLEEP 57529768b45cSJane Li static int mvneta_suspend(struct device *device) 57539768b45cSJane Li { 57541799cdd2SJisheng Zhang int queue; 57559768b45cSJane Li struct net_device *dev = dev_get_drvdata(device); 57569768b45cSJane Li struct mvneta_port *pp = netdev_priv(dev); 57579768b45cSJane Li 57581799cdd2SJisheng Zhang if (!netif_running(dev)) 57591799cdd2SJisheng Zhang goto clean_exit; 57601799cdd2SJisheng Zhang 57611799cdd2SJisheng Zhang if (!pp->neta_armada3700) { 57621799cdd2SJisheng Zhang spin_lock(&pp->lock); 57631799cdd2SJisheng Zhang pp->is_stopped = true; 57641799cdd2SJisheng Zhang spin_unlock(&pp->lock); 57651799cdd2SJisheng Zhang 57661799cdd2SJisheng Zhang cpuhp_state_remove_instance_nocalls(online_hpstate, 57671799cdd2SJisheng Zhang &pp->node_online); 57681799cdd2SJisheng Zhang cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 57691799cdd2SJisheng Zhang &pp->node_dead); 57701799cdd2SJisheng Zhang } 57711799cdd2SJisheng Zhang 57723b8bc674SRussell King rtnl_lock(); 57731799cdd2SJisheng Zhang mvneta_stop_dev(pp); 57743b8bc674SRussell King rtnl_unlock(); 57751799cdd2SJisheng Zhang 57761799cdd2SJisheng Zhang for (queue = 0; queue < rxq_number; queue++) { 57771799cdd2SJisheng Zhang struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 57781799cdd2SJisheng Zhang 57791799cdd2SJisheng Zhang mvneta_rxq_drop_pkts(pp, rxq); 57801799cdd2SJisheng Zhang } 57811799cdd2SJisheng Zhang 57821799cdd2SJisheng Zhang for (queue = 0; queue < txq_number; queue++) { 57831799cdd2SJisheng Zhang struct mvneta_tx_queue *txq = &pp->txqs[queue]; 57841799cdd2SJisheng Zhang 57851799cdd2SJisheng Zhang mvneta_txq_hw_deinit(pp, txq); 57861799cdd2SJisheng Zhang } 57871799cdd2SJisheng Zhang 57881799cdd2SJisheng Zhang clean_exit: 57899768b45cSJane Li netif_device_detach(dev); 57909768b45cSJane Li clk_disable_unprepare(pp->clk_bus); 57919768b45cSJane Li clk_disable_unprepare(pp->clk); 57921799cdd2SJisheng Zhang 57939768b45cSJane Li return 0; 57949768b45cSJane Li } 57959768b45cSJane Li 57969768b45cSJane Li static int mvneta_resume(struct device *device) 57979768b45cSJane Li { 57989768b45cSJane Li struct platform_device *pdev = to_platform_device(device); 57999768b45cSJane Li struct net_device *dev = dev_get_drvdata(device); 58009768b45cSJane Li struct mvneta_port *pp = netdev_priv(dev); 58011799cdd2SJisheng Zhang int err, queue; 58029768b45cSJane Li 58039768b45cSJane Li clk_prepare_enable(pp->clk); 58049768b45cSJane Li if (!IS_ERR(pp->clk_bus)) 58059768b45cSJane Li clk_prepare_enable(pp->clk_bus); 58069768b45cSJane Li if (pp->dram_target_info || pp->neta_armada3700) 58079768b45cSJane Li mvneta_conf_mbus_windows(pp, pp->dram_target_info); 58089768b45cSJane Li if (pp->bm_priv) { 58099768b45cSJane Li err = mvneta_bm_port_init(pdev, pp); 58109768b45cSJane Li if (err < 0) { 58119768b45cSJane Li dev_info(&pdev->dev, "use SW buffer management\n"); 581244efc78dSLorenzo Bianconi pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 58139768b45cSJane Li pp->bm_priv = NULL; 58149768b45cSJane Li } 58159768b45cSJane Li } 58169768b45cSJane Li mvneta_defaults_set(pp); 58179768b45cSJane Li err = mvneta_port_power_up(pp, pp->phy_interface); 58189768b45cSJane Li if (err < 0) { 58199768b45cSJane Li dev_err(device, "can't power up port\n"); 58209768b45cSJane Li return err; 58219768b45cSJane Li } 58229768b45cSJane Li 58239768b45cSJane Li netif_device_attach(dev); 58241799cdd2SJisheng Zhang 58251799cdd2SJisheng Zhang if (!netif_running(dev)) 58261799cdd2SJisheng Zhang return 0; 58271799cdd2SJisheng Zhang 58281799cdd2SJisheng Zhang for (queue = 0; queue < rxq_number; queue++) { 58291799cdd2SJisheng Zhang struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 58301799cdd2SJisheng Zhang 58311799cdd2SJisheng Zhang rxq->next_desc_to_proc = 0; 58321799cdd2SJisheng Zhang mvneta_rxq_hw_init(pp, rxq); 5833d6956ac8SJisheng Zhang } 58341799cdd2SJisheng Zhang 58351799cdd2SJisheng Zhang for (queue = 0; queue < txq_number; queue++) { 58361799cdd2SJisheng Zhang struct mvneta_tx_queue *txq = &pp->txqs[queue]; 58371799cdd2SJisheng Zhang 58381799cdd2SJisheng Zhang txq->next_desc_to_proc = 0; 58391799cdd2SJisheng Zhang mvneta_txq_hw_init(pp, txq); 58401799cdd2SJisheng Zhang } 58411799cdd2SJisheng Zhang 58421799cdd2SJisheng Zhang if (!pp->neta_armada3700) { 58431799cdd2SJisheng Zhang spin_lock(&pp->lock); 58441799cdd2SJisheng Zhang pp->is_stopped = false; 58451799cdd2SJisheng Zhang spin_unlock(&pp->lock); 58461799cdd2SJisheng Zhang cpuhp_state_add_instance_nocalls(online_hpstate, 58471799cdd2SJisheng Zhang &pp->node_online); 58481799cdd2SJisheng Zhang cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 58491799cdd2SJisheng Zhang &pp->node_dead); 58501799cdd2SJisheng Zhang } 58511799cdd2SJisheng Zhang 58521799cdd2SJisheng Zhang rtnl_lock(); 58531799cdd2SJisheng Zhang mvneta_start_dev(pp); 58543b8bc674SRussell King rtnl_unlock(); 58551799cdd2SJisheng Zhang mvneta_set_rx_mode(dev); 5856d6956ac8SJisheng Zhang 58579768b45cSJane Li return 0; 58589768b45cSJane Li } 58599768b45cSJane Li #endif 58609768b45cSJane Li 58619768b45cSJane Li static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume); 58629768b45cSJane Li 5863c5aff182SThomas Petazzoni static const struct of_device_id mvneta_match[] = { 5864c5aff182SThomas Petazzoni { .compatible = "marvell,armada-370-neta" }, 5865f522a975SSimon Guinot { .compatible = "marvell,armada-xp-neta" }, 58662636ac3cSMarcin Wojtas { .compatible = "marvell,armada-3700-neta" }, 58672d2a514cSChris Packham { .compatible = "marvell,armada-ac5-neta" }, 5868c5aff182SThomas Petazzoni { } 5869c5aff182SThomas Petazzoni }; 5870c5aff182SThomas Petazzoni MODULE_DEVICE_TABLE(of, mvneta_match); 5871c5aff182SThomas Petazzoni 5872c5aff182SThomas Petazzoni static struct platform_driver mvneta_driver = { 5873c5aff182SThomas Petazzoni .probe = mvneta_probe, 587403ce758eSGreg KH .remove = mvneta_remove, 5875c5aff182SThomas Petazzoni .driver = { 5876c5aff182SThomas Petazzoni .name = MVNETA_DRIVER_NAME, 5877c5aff182SThomas Petazzoni .of_match_table = mvneta_match, 58789768b45cSJane Li .pm = &mvneta_pm_ops, 5879c5aff182SThomas Petazzoni }, 5880c5aff182SThomas Petazzoni }; 5881c5aff182SThomas Petazzoni 588284a3f4dbSSebastian Andrzej Siewior static int __init mvneta_driver_init(void) 588384a3f4dbSSebastian Andrzej Siewior { 588484a3f4dbSSebastian Andrzej Siewior int ret; 588584a3f4dbSSebastian Andrzej Siewior 588633f4cefbSRussell King (Oracle) BUILD_BUG_ON_NOT_POWER_OF_2(MVNETA_TSO_PER_PAGE); 588733f4cefbSRussell King (Oracle) 5888664d035cSChristophe JAILLET ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online", 588984a3f4dbSSebastian Andrzej Siewior mvneta_cpu_online, 589084a3f4dbSSebastian Andrzej Siewior mvneta_cpu_down_prepare); 589184a3f4dbSSebastian Andrzej Siewior if (ret < 0) 589284a3f4dbSSebastian Andrzej Siewior goto out; 589384a3f4dbSSebastian Andrzej Siewior online_hpstate = ret; 589484a3f4dbSSebastian Andrzej Siewior ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead", 589584a3f4dbSSebastian Andrzej Siewior NULL, mvneta_cpu_dead); 589684a3f4dbSSebastian Andrzej Siewior if (ret) 589784a3f4dbSSebastian Andrzej Siewior goto err_dead; 589884a3f4dbSSebastian Andrzej Siewior 589984a3f4dbSSebastian Andrzej Siewior ret = platform_driver_register(&mvneta_driver); 590084a3f4dbSSebastian Andrzej Siewior if (ret) 590184a3f4dbSSebastian Andrzej Siewior goto err; 590284a3f4dbSSebastian Andrzej Siewior return 0; 590384a3f4dbSSebastian Andrzej Siewior 590484a3f4dbSSebastian Andrzej Siewior err: 590584a3f4dbSSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); 590684a3f4dbSSebastian Andrzej Siewior err_dead: 590784a3f4dbSSebastian Andrzej Siewior cpuhp_remove_multi_state(online_hpstate); 590884a3f4dbSSebastian Andrzej Siewior out: 590984a3f4dbSSebastian Andrzej Siewior return ret; 591084a3f4dbSSebastian Andrzej Siewior } 591184a3f4dbSSebastian Andrzej Siewior module_init(mvneta_driver_init); 591284a3f4dbSSebastian Andrzej Siewior 591384a3f4dbSSebastian Andrzej Siewior static void __exit mvneta_driver_exit(void) 591484a3f4dbSSebastian Andrzej Siewior { 591584a3f4dbSSebastian Andrzej Siewior platform_driver_unregister(&mvneta_driver); 591684a3f4dbSSebastian Andrzej Siewior cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); 591784a3f4dbSSebastian Andrzej Siewior cpuhp_remove_multi_state(online_hpstate); 591884a3f4dbSSebastian Andrzej Siewior } 591984a3f4dbSSebastian Andrzej Siewior module_exit(mvneta_driver_exit); 5920c5aff182SThomas Petazzoni 5921c5aff182SThomas Petazzoni MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 5922c5aff182SThomas Petazzoni MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 5923c5aff182SThomas Petazzoni MODULE_LICENSE("GPL"); 5924c5aff182SThomas Petazzoni 5925d3757ba4SJoe Perches module_param(rxq_number, int, 0444); 5926d3757ba4SJoe Perches module_param(txq_number, int, 0444); 5927c5aff182SThomas Petazzoni 5928d3757ba4SJoe Perches module_param(rxq_def, int, 0444); 5929d3757ba4SJoe Perches module_param(rx_copybreak, int, 0644); 5930