1 /* 2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Rami Rosen <rosenr@marvell.com> 7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 8 * 9 * This file is licensed under the terms of the GNU General Public 10 * License version 2. This program is licensed "as is" without any 11 * warranty of any kind, whether express or implied. 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/cpu.h> 16 #include <linux/etherdevice.h> 17 #include <linux/if_vlan.h> 18 #include <linux/inetdevice.h> 19 #include <linux/interrupt.h> 20 #include <linux/io.h> 21 #include <linux/kernel.h> 22 #include <linux/mbus.h> 23 #include <linux/module.h> 24 #include <linux/netdevice.h> 25 #include <linux/of.h> 26 #include <linux/of_address.h> 27 #include <linux/of_irq.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_net.h> 30 #include <linux/phy/phy.h> 31 #include <linux/phy.h> 32 #include <linux/phylink.h> 33 #include <linux/platform_device.h> 34 #include <linux/skbuff.h> 35 #include <net/hwbm.h> 36 #include "mvneta_bm.h" 37 #include <net/ip.h> 38 #include <net/ipv6.h> 39 #include <net/tso.h> 40 #include <net/page_pool.h> 41 #include <net/pkt_cls.h> 42 #include <linux/bpf_trace.h> 43 44 /* Registers */ 45 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 46 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) 47 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 48 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 49 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 50 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 51 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 52 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 53 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 54 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) 55 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) 56 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) 57 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 58 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) 59 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) 60 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff 61 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) 62 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 63 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 64 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) 65 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 66 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 67 #define MVNETA_PORT_RX_RESET 0x1cc0 68 #define MVNETA_PORT_RX_DMA_RESET BIT(0) 69 #define MVNETA_PHY_ADDR 0x2000 70 #define MVNETA_PHY_ADDR_MASK 0x1f 71 #define MVNETA_MBUS_RETRY 0x2010 72 #define MVNETA_UNIT_INTR_CAUSE 0x2080 73 #define MVNETA_UNIT_CONTROL 0x20B0 74 #define MVNETA_PHY_POLLING_ENABLE BIT(1) 75 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) 76 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 77 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 78 #define MVNETA_BASE_ADDR_ENABLE 0x2290 79 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 80 #define MVNETA_PORT_CONFIG 0x2400 81 #define MVNETA_UNI_PROMISC_MODE BIT(0) 82 #define MVNETA_DEF_RXQ(q) ((q) << 1) 83 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) 84 #define MVNETA_TX_UNSET_ERR_SUM BIT(12) 85 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) 86 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) 87 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) 88 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) 89 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ 90 MVNETA_DEF_RXQ_ARP(q) | \ 91 MVNETA_DEF_RXQ_TCP(q) | \ 92 MVNETA_DEF_RXQ_UDP(q) | \ 93 MVNETA_DEF_RXQ_BPDU(q) | \ 94 MVNETA_TX_UNSET_ERR_SUM | \ 95 MVNETA_RX_CSUM_WITH_PSEUDO_HDR) 96 #define MVNETA_PORT_CONFIG_EXTEND 0x2404 97 #define MVNETA_MAC_ADDR_LOW 0x2414 98 #define MVNETA_MAC_ADDR_HIGH 0x2418 99 #define MVNETA_SDMA_CONFIG 0x241c 100 #define MVNETA_SDMA_BRST_SIZE_16 4 101 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 102 #define MVNETA_RX_NO_DATA_SWAP BIT(4) 103 #define MVNETA_TX_NO_DATA_SWAP BIT(5) 104 #define MVNETA_DESC_SWAP BIT(6) 105 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 106 #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 107 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) 108 #define MVNETA_PORT_STATUS 0x2444 109 #define MVNETA_TX_IN_PRGRS BIT(0) 110 #define MVNETA_TX_FIFO_EMPTY BIT(8) 111 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c 112 /* Only exists on Armada XP and Armada 370 */ 113 #define MVNETA_SERDES_CFG 0x24A0 114 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 115 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 116 #define MVNETA_HSGMII_SERDES_PROTO 0x1107 117 #define MVNETA_TYPE_PRIO 0x24bc 118 #define MVNETA_FORCE_UNI BIT(21) 119 #define MVNETA_TXQ_CMD_1 0x24e4 120 #define MVNETA_TXQ_CMD 0x2448 121 #define MVNETA_TXQ_DISABLE_SHIFT 8 122 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff 123 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 124 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488 125 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 126 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) 127 #define MVNETA_ACC_MODE 0x2500 128 #define MVNETA_BM_ADDRESS 0x2504 129 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 130 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff 131 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 132 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) 133 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) 134 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) 135 136 /* Exception Interrupt Port/Queue Cause register 137 * 138 * Their behavior depend of the mapping done using the PCPX2Q 139 * registers. For a given CPU if the bit associated to a queue is not 140 * set, then for the register a read from this CPU will always return 141 * 0 and a write won't do anything 142 */ 143 144 #define MVNETA_INTR_NEW_CAUSE 0x25a0 145 #define MVNETA_INTR_NEW_MASK 0x25a4 146 147 /* bits 0..7 = TXQ SENT, one bit per queue. 148 * bits 8..15 = RXQ OCCUP, one bit per queue. 149 * bits 16..23 = RXQ FREE, one bit per queue. 150 * bit 29 = OLD_REG_SUM, see old reg ? 151 * bit 30 = TX_ERR_SUM, one bit for 4 ports 152 * bit 31 = MISC_SUM, one bit for 4 ports 153 */ 154 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) 155 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) 156 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 157 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) 158 #define MVNETA_MISCINTR_INTR_MASK BIT(31) 159 160 #define MVNETA_INTR_OLD_CAUSE 0x25a8 161 #define MVNETA_INTR_OLD_MASK 0x25ac 162 163 /* Data Path Port/Queue Cause Register */ 164 #define MVNETA_INTR_MISC_CAUSE 0x25b0 165 #define MVNETA_INTR_MISC_MASK 0x25b4 166 167 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) 168 #define MVNETA_CAUSE_LINK_CHANGE BIT(1) 169 #define MVNETA_CAUSE_PTP BIT(4) 170 171 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) 172 #define MVNETA_CAUSE_RX_OVERRUN BIT(8) 173 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) 174 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) 175 #define MVNETA_CAUSE_TX_UNDERUN BIT(11) 176 #define MVNETA_CAUSE_PRBS_ERR BIT(12) 177 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) 178 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) 179 180 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 181 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) 182 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) 183 184 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 185 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) 186 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) 187 188 #define MVNETA_INTR_ENABLE 0x25b8 189 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 190 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff 191 192 #define MVNETA_RXQ_CMD 0x2680 193 #define MVNETA_RXQ_DISABLE_SHIFT 8 194 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff 195 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) 196 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) 197 #define MVNETA_GMAC_CTRL_0 0x2c00 198 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 199 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 200 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1) 201 #define MVNETA_GMAC0_PORT_ENABLE BIT(0) 202 #define MVNETA_GMAC_CTRL_2 0x2c08 203 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) 204 #define MVNETA_GMAC2_PCS_ENABLE BIT(3) 205 #define MVNETA_GMAC2_PORT_RGMII BIT(4) 206 #define MVNETA_GMAC2_PORT_RESET BIT(6) 207 #define MVNETA_GMAC_STATUS 0x2c10 208 #define MVNETA_GMAC_LINK_UP BIT(0) 209 #define MVNETA_GMAC_SPEED_1000 BIT(1) 210 #define MVNETA_GMAC_SPEED_100 BIT(2) 211 #define MVNETA_GMAC_FULL_DUPLEX BIT(3) 212 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) 213 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) 214 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) 215 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) 216 #define MVNETA_GMAC_AN_COMPLETE BIT(11) 217 #define MVNETA_GMAC_SYNC_OK BIT(14) 218 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 219 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 220 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 221 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) 222 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3) 223 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4) 224 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 225 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 226 #define MVNETA_GMAC_AN_SPEED_EN BIT(7) 227 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8) 228 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9) 229 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) 230 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 231 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) 232 #define MVNETA_GMAC_CTRL_4 0x2c90 233 #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1) 234 #define MVNETA_MIB_COUNTERS_BASE 0x3000 235 #define MVNETA_MIB_LATE_COLLISION 0x7c 236 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 237 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 238 #define MVNETA_DA_FILT_UCAST_BASE 0x3600 239 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) 240 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) 241 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 242 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) 243 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) 244 #define MVNETA_TXQ_DEC_SENT_SHIFT 16 245 #define MVNETA_TXQ_DEC_SENT_MASK 0xff 246 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) 247 #define MVNETA_TXQ_SENT_DESC_SHIFT 16 248 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 249 #define MVNETA_PORT_TX_RESET 0x3cf0 250 #define MVNETA_PORT_TX_DMA_RESET BIT(0) 251 #define MVNETA_TXQ_CMD1_REG 0x3e00 252 #define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3) 253 #define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0) 254 #define MVNETA_REFILL_NUM_CLK_REG 0x3e08 255 #define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff 256 #define MVNETA_TX_MTU 0x3e0c 257 #define MVNETA_TX_TOKEN_SIZE 0x3e14 258 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff 259 #define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2)) 260 #define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000 261 #define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20 262 #define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff 263 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) 264 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff 265 266 /* The values of the bucket refill base period and refill period are taken from 267 * the reference manual, and adds up to a base resolution of 10Kbps. This allows 268 * to cover all rate-limit values from 10Kbps up to 5Gbps 269 */ 270 271 /* Base period for the rate limit algorithm */ 272 #define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100 273 274 /* Number of Base Period to wait between each bucket refill */ 275 #define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000 276 277 /* The base resolution for rate limiting, in bps. Any max_rate value should be 278 * a multiple of that value. 279 */ 280 #define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \ 281 (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \ 282 MVNETA_TXQ_BUCKET_REFILL_PERIOD)) 283 284 #define MVNETA_LPI_CTRL_0 0x2cc0 285 #define MVNETA_LPI_CTRL_1 0x2cc4 286 #define MVNETA_LPI_REQUEST_ENABLE BIT(0) 287 #define MVNETA_LPI_CTRL_2 0x2cc8 288 #define MVNETA_LPI_STATUS 0x2ccc 289 290 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 291 292 /* Descriptor ring Macros */ 293 #define MVNETA_QUEUE_NEXT_DESC(q, index) \ 294 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 295 296 /* Various constants */ 297 298 /* Coalescing */ 299 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ 300 #define MVNETA_RX_COAL_PKTS 32 301 #define MVNETA_RX_COAL_USEC 100 302 303 /* The two bytes Marvell header. Either contains a special value used 304 * by Marvell switches when a specific hardware mode is enabled (not 305 * supported by this driver) or is filled automatically by zeroes on 306 * the RX side. Those two bytes being at the front of the Ethernet 307 * header, they allow to have the IP header aligned on a 4 bytes 308 * boundary automatically: the hardware skips those two bytes on its 309 * own. 310 */ 311 #define MVNETA_MH_SIZE 2 312 313 #define MVNETA_VLAN_TAG_LEN 4 314 315 #define MVNETA_TX_CSUM_DEF_SIZE 1600 316 #define MVNETA_TX_CSUM_MAX_SIZE 9800 317 #define MVNETA_ACC_MODE_EXT1 1 318 #define MVNETA_ACC_MODE_EXT2 2 319 320 #define MVNETA_MAX_DECODE_WIN 6 321 322 /* Timeout constants */ 323 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 324 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 325 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 326 327 #define MVNETA_TX_MTU_MAX 0x3ffff 328 329 /* The RSS lookup table actually has 256 entries but we do not use 330 * them yet 331 */ 332 #define MVNETA_RSS_LU_TABLE_SIZE 1 333 334 /* Max number of Rx descriptors */ 335 #define MVNETA_MAX_RXD 512 336 337 /* Max number of Tx descriptors */ 338 #define MVNETA_MAX_TXD 1024 339 340 /* Max number of allowed TCP segments for software TSO */ 341 #define MVNETA_MAX_TSO_SEGS 100 342 343 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 344 345 /* descriptor aligned size */ 346 #define MVNETA_DESC_ALIGNED_SIZE 32 347 348 /* Number of bytes to be taken into account by HW when putting incoming data 349 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet 350 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers. 351 */ 352 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64 353 354 #define MVNETA_RX_PKT_SIZE(mtu) \ 355 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 356 ETH_HLEN + ETH_FCS_LEN, \ 357 cache_line_size()) 358 359 /* Driver assumes that the last 3 bits are 0 */ 360 #define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) 361 #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ 362 MVNETA_SKB_HEADROOM)) 363 #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD) 364 365 #define IS_TSO_HEADER(txq, addr) \ 366 ((addr >= txq->tso_hdrs_phys) && \ 367 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) 368 369 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \ 370 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) 371 372 enum { 373 ETHTOOL_STAT_EEE_WAKEUP, 374 ETHTOOL_STAT_SKB_ALLOC_ERR, 375 ETHTOOL_STAT_REFILL_ERR, 376 ETHTOOL_XDP_REDIRECT, 377 ETHTOOL_XDP_PASS, 378 ETHTOOL_XDP_DROP, 379 ETHTOOL_XDP_TX, 380 ETHTOOL_XDP_TX_ERR, 381 ETHTOOL_XDP_XMIT, 382 ETHTOOL_XDP_XMIT_ERR, 383 ETHTOOL_MAX_STATS, 384 }; 385 386 struct mvneta_statistic { 387 unsigned short offset; 388 unsigned short type; 389 const char name[ETH_GSTRING_LEN]; 390 }; 391 392 #define T_REG_32 32 393 #define T_REG_64 64 394 #define T_SW 1 395 396 #define MVNETA_XDP_PASS 0 397 #define MVNETA_XDP_DROPPED BIT(0) 398 #define MVNETA_XDP_TX BIT(1) 399 #define MVNETA_XDP_REDIR BIT(2) 400 401 static const struct mvneta_statistic mvneta_statistics[] = { 402 { 0x3000, T_REG_64, "good_octets_received", }, 403 { 0x3010, T_REG_32, "good_frames_received", }, 404 { 0x3008, T_REG_32, "bad_octets_received", }, 405 { 0x3014, T_REG_32, "bad_frames_received", }, 406 { 0x3018, T_REG_32, "broadcast_frames_received", }, 407 { 0x301c, T_REG_32, "multicast_frames_received", }, 408 { 0x3050, T_REG_32, "unrec_mac_control_received", }, 409 { 0x3058, T_REG_32, "good_fc_received", }, 410 { 0x305c, T_REG_32, "bad_fc_received", }, 411 { 0x3060, T_REG_32, "undersize_received", }, 412 { 0x3064, T_REG_32, "fragments_received", }, 413 { 0x3068, T_REG_32, "oversize_received", }, 414 { 0x306c, T_REG_32, "jabber_received", }, 415 { 0x3070, T_REG_32, "mac_receive_error", }, 416 { 0x3074, T_REG_32, "bad_crc_event", }, 417 { 0x3078, T_REG_32, "collision", }, 418 { 0x307c, T_REG_32, "late_collision", }, 419 { 0x2484, T_REG_32, "rx_discard", }, 420 { 0x2488, T_REG_32, "rx_overrun", }, 421 { 0x3020, T_REG_32, "frames_64_octets", }, 422 { 0x3024, T_REG_32, "frames_65_to_127_octets", }, 423 { 0x3028, T_REG_32, "frames_128_to_255_octets", }, 424 { 0x302c, T_REG_32, "frames_256_to_511_octets", }, 425 { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, 426 { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, 427 { 0x3038, T_REG_64, "good_octets_sent", }, 428 { 0x3040, T_REG_32, "good_frames_sent", }, 429 { 0x3044, T_REG_32, "excessive_collision", }, 430 { 0x3048, T_REG_32, "multicast_frames_sent", }, 431 { 0x304c, T_REG_32, "broadcast_frames_sent", }, 432 { 0x3054, T_REG_32, "fc_sent", }, 433 { 0x300c, T_REG_32, "internal_mac_transmit_err", }, 434 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, 435 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", }, 436 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", }, 437 { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", }, 438 { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", }, 439 { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", }, 440 { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", }, 441 { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", }, 442 { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", }, 443 { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", }, 444 }; 445 446 struct mvneta_stats { 447 u64 rx_packets; 448 u64 rx_bytes; 449 u64 tx_packets; 450 u64 tx_bytes; 451 /* xdp */ 452 u64 xdp_redirect; 453 u64 xdp_pass; 454 u64 xdp_drop; 455 u64 xdp_xmit; 456 u64 xdp_xmit_err; 457 u64 xdp_tx; 458 u64 xdp_tx_err; 459 }; 460 461 struct mvneta_ethtool_stats { 462 struct mvneta_stats ps; 463 u64 skb_alloc_error; 464 u64 refill_error; 465 }; 466 467 struct mvneta_pcpu_stats { 468 struct u64_stats_sync syncp; 469 470 struct mvneta_ethtool_stats es; 471 u64 rx_dropped; 472 u64 rx_errors; 473 }; 474 475 struct mvneta_pcpu_port { 476 /* Pointer to the shared port */ 477 struct mvneta_port *pp; 478 479 /* Pointer to the CPU-local NAPI struct */ 480 struct napi_struct napi; 481 482 /* Cause of the previous interrupt */ 483 u32 cause_rx_tx; 484 }; 485 486 enum { 487 __MVNETA_DOWN, 488 }; 489 490 struct mvneta_port { 491 u8 id; 492 struct mvneta_pcpu_port __percpu *ports; 493 struct mvneta_pcpu_stats __percpu *stats; 494 495 unsigned long state; 496 497 int pkt_size; 498 void __iomem *base; 499 struct mvneta_rx_queue *rxqs; 500 struct mvneta_tx_queue *txqs; 501 struct net_device *dev; 502 struct hlist_node node_online; 503 struct hlist_node node_dead; 504 int rxq_def; 505 /* Protect the access to the percpu interrupt registers, 506 * ensuring that the configuration remains coherent. 507 */ 508 spinlock_t lock; 509 bool is_stopped; 510 511 u32 cause_rx_tx; 512 struct napi_struct napi; 513 514 struct bpf_prog *xdp_prog; 515 516 /* Core clock */ 517 struct clk *clk; 518 /* AXI clock */ 519 struct clk *clk_bus; 520 u8 mcast_count[256]; 521 u16 tx_ring_size; 522 u16 rx_ring_size; 523 524 phy_interface_t phy_interface; 525 struct device_node *dn; 526 unsigned int tx_csum_limit; 527 struct phylink *phylink; 528 struct phylink_config phylink_config; 529 struct phylink_pcs phylink_pcs; 530 struct phy *comphy; 531 532 struct mvneta_bm *bm_priv; 533 struct mvneta_bm_pool *pool_long; 534 struct mvneta_bm_pool *pool_short; 535 int bm_win_id; 536 537 bool eee_enabled; 538 bool eee_active; 539 bool tx_lpi_enabled; 540 541 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; 542 543 u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; 544 545 /* Flags for special SoC configurations */ 546 bool neta_armada3700; 547 u16 rx_offset_correction; 548 const struct mbus_dram_target_info *dram_target_info; 549 }; 550 551 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the 552 * layout of the transmit and reception DMA descriptors, and their 553 * layout is therefore defined by the hardware design 554 */ 555 556 #define MVNETA_TX_L3_OFF_SHIFT 0 557 #define MVNETA_TX_IP_HLEN_SHIFT 8 558 #define MVNETA_TX_L4_UDP BIT(16) 559 #define MVNETA_TX_L3_IP6 BIT(17) 560 #define MVNETA_TXD_IP_CSUM BIT(18) 561 #define MVNETA_TXD_Z_PAD BIT(19) 562 #define MVNETA_TXD_L_DESC BIT(20) 563 #define MVNETA_TXD_F_DESC BIT(21) 564 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ 565 MVNETA_TXD_L_DESC | \ 566 MVNETA_TXD_F_DESC) 567 #define MVNETA_TX_L4_CSUM_FULL BIT(30) 568 #define MVNETA_TX_L4_CSUM_NOT BIT(31) 569 570 #define MVNETA_RXD_ERR_CRC 0x0 571 #define MVNETA_RXD_BM_POOL_SHIFT 13 572 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) 573 #define MVNETA_RXD_ERR_SUMMARY BIT(16) 574 #define MVNETA_RXD_ERR_OVERRUN BIT(17) 575 #define MVNETA_RXD_ERR_LEN BIT(18) 576 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) 577 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) 578 #define MVNETA_RXD_L3_IP4 BIT(25) 579 #define MVNETA_RXD_LAST_DESC BIT(26) 580 #define MVNETA_RXD_FIRST_DESC BIT(27) 581 #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \ 582 MVNETA_RXD_LAST_DESC) 583 #define MVNETA_RXD_L4_CSUM_OK BIT(30) 584 585 #if defined(__LITTLE_ENDIAN) 586 struct mvneta_tx_desc { 587 u32 command; /* Options used by HW for packet transmitting.*/ 588 u16 reserved1; /* csum_l4 (for future use) */ 589 u16 data_size; /* Data size of transmitted packet in bytes */ 590 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 591 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 592 u32 reserved3[4]; /* Reserved - (for future use) */ 593 }; 594 595 struct mvneta_rx_desc { 596 u32 status; /* Info about received packet */ 597 u16 reserved1; /* pnc_info - (for future use, PnC) */ 598 u16 data_size; /* Size of received packet in bytes */ 599 600 u32 buf_phys_addr; /* Physical address of the buffer */ 601 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 602 603 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 604 u16 reserved3; /* prefetch_cmd, for future use */ 605 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 606 607 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 608 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 609 }; 610 #else 611 struct mvneta_tx_desc { 612 u16 data_size; /* Data size of transmitted packet in bytes */ 613 u16 reserved1; /* csum_l4 (for future use) */ 614 u32 command; /* Options used by HW for packet transmitting.*/ 615 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 616 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 617 u32 reserved3[4]; /* Reserved - (for future use) */ 618 }; 619 620 struct mvneta_rx_desc { 621 u16 data_size; /* Size of received packet in bytes */ 622 u16 reserved1; /* pnc_info - (for future use, PnC) */ 623 u32 status; /* Info about received packet */ 624 625 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 626 u32 buf_phys_addr; /* Physical address of the buffer */ 627 628 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 629 u16 reserved3; /* prefetch_cmd, for future use */ 630 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 631 632 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 633 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 634 }; 635 #endif 636 637 enum mvneta_tx_buf_type { 638 MVNETA_TYPE_SKB, 639 MVNETA_TYPE_XDP_TX, 640 MVNETA_TYPE_XDP_NDO, 641 }; 642 643 struct mvneta_tx_buf { 644 enum mvneta_tx_buf_type type; 645 union { 646 struct xdp_frame *xdpf; 647 struct sk_buff *skb; 648 }; 649 }; 650 651 struct mvneta_tx_queue { 652 /* Number of this TX queue, in the range 0-7 */ 653 u8 id; 654 655 /* Number of TX DMA descriptors in the descriptor ring */ 656 int size; 657 658 /* Number of currently used TX DMA descriptor in the 659 * descriptor ring 660 */ 661 int count; 662 int pending; 663 int tx_stop_threshold; 664 int tx_wake_threshold; 665 666 /* Array of transmitted buffers */ 667 struct mvneta_tx_buf *buf; 668 669 /* Index of last TX DMA descriptor that was inserted */ 670 int txq_put_index; 671 672 /* Index of the TX DMA descriptor to be cleaned up */ 673 int txq_get_index; 674 675 u32 done_pkts_coal; 676 677 /* Virtual address of the TX DMA descriptors array */ 678 struct mvneta_tx_desc *descs; 679 680 /* DMA address of the TX DMA descriptors array */ 681 dma_addr_t descs_phys; 682 683 /* Index of the last TX DMA descriptor */ 684 int last_desc; 685 686 /* Index of the next TX DMA descriptor to process */ 687 int next_desc_to_proc; 688 689 /* DMA buffers for TSO headers */ 690 char *tso_hdrs; 691 692 /* DMA address of TSO headers */ 693 dma_addr_t tso_hdrs_phys; 694 695 /* Affinity mask for CPUs*/ 696 cpumask_t affinity_mask; 697 }; 698 699 struct mvneta_rx_queue { 700 /* rx queue number, in the range 0-7 */ 701 u8 id; 702 703 /* num of rx descriptors in the rx descriptor ring */ 704 int size; 705 706 u32 pkts_coal; 707 u32 time_coal; 708 709 /* page_pool */ 710 struct page_pool *page_pool; 711 struct xdp_rxq_info xdp_rxq; 712 713 /* Virtual address of the RX buffer */ 714 void **buf_virt_addr; 715 716 /* Virtual address of the RX DMA descriptors array */ 717 struct mvneta_rx_desc *descs; 718 719 /* DMA address of the RX DMA descriptors array */ 720 dma_addr_t descs_phys; 721 722 /* Index of the last RX DMA descriptor */ 723 int last_desc; 724 725 /* Index of the next RX DMA descriptor to process */ 726 int next_desc_to_proc; 727 728 /* Index of first RX DMA descriptor to refill */ 729 int first_to_refill; 730 u32 refill_num; 731 }; 732 733 static enum cpuhp_state online_hpstate; 734 /* The hardware supports eight (8) rx queues, but we are only allowing 735 * the first one to be used. Therefore, let's just allocate one queue. 736 */ 737 static int rxq_number = 8; 738 static int txq_number = 8; 739 740 static int rxq_def; 741 742 static int rx_copybreak __read_mostly = 256; 743 744 /* HW BM need that each port be identify by a unique ID */ 745 static int global_port_id; 746 747 #define MVNETA_DRIVER_NAME "mvneta" 748 #define MVNETA_DRIVER_VERSION "1.0" 749 750 /* Utility/helper methods */ 751 752 /* Write helper method */ 753 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) 754 { 755 writel(data, pp->base + offset); 756 } 757 758 /* Read helper method */ 759 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) 760 { 761 return readl(pp->base + offset); 762 } 763 764 /* Increment txq get counter */ 765 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) 766 { 767 txq->txq_get_index++; 768 if (txq->txq_get_index == txq->size) 769 txq->txq_get_index = 0; 770 } 771 772 /* Increment txq put counter */ 773 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) 774 { 775 txq->txq_put_index++; 776 if (txq->txq_put_index == txq->size) 777 txq->txq_put_index = 0; 778 } 779 780 781 /* Clear all MIB counters */ 782 static void mvneta_mib_counters_clear(struct mvneta_port *pp) 783 { 784 int i; 785 786 /* Perform dummy reads from MIB counters */ 787 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) 788 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); 789 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); 790 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); 791 } 792 793 /* Get System Network Statistics */ 794 static void 795 mvneta_get_stats64(struct net_device *dev, 796 struct rtnl_link_stats64 *stats) 797 { 798 struct mvneta_port *pp = netdev_priv(dev); 799 unsigned int start; 800 int cpu; 801 802 for_each_possible_cpu(cpu) { 803 struct mvneta_pcpu_stats *cpu_stats; 804 u64 rx_packets; 805 u64 rx_bytes; 806 u64 rx_dropped; 807 u64 rx_errors; 808 u64 tx_packets; 809 u64 tx_bytes; 810 811 cpu_stats = per_cpu_ptr(pp->stats, cpu); 812 do { 813 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 814 rx_packets = cpu_stats->es.ps.rx_packets; 815 rx_bytes = cpu_stats->es.ps.rx_bytes; 816 rx_dropped = cpu_stats->rx_dropped; 817 rx_errors = cpu_stats->rx_errors; 818 tx_packets = cpu_stats->es.ps.tx_packets; 819 tx_bytes = cpu_stats->es.ps.tx_bytes; 820 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 821 822 stats->rx_packets += rx_packets; 823 stats->rx_bytes += rx_bytes; 824 stats->rx_dropped += rx_dropped; 825 stats->rx_errors += rx_errors; 826 stats->tx_packets += tx_packets; 827 stats->tx_bytes += tx_bytes; 828 } 829 830 stats->tx_dropped = dev->stats.tx_dropped; 831 } 832 833 /* Rx descriptors helper methods */ 834 835 /* Checks whether the RX descriptor having this status is both the first 836 * and the last descriptor for the RX packet. Each RX packet is currently 837 * received through a single RX descriptor, so not having each RX 838 * descriptor with its first and last bits set is an error 839 */ 840 static int mvneta_rxq_desc_is_first_last(u32 status) 841 { 842 return (status & MVNETA_RXD_FIRST_LAST_DESC) == 843 MVNETA_RXD_FIRST_LAST_DESC; 844 } 845 846 /* Add number of descriptors ready to receive new packets */ 847 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, 848 struct mvneta_rx_queue *rxq, 849 int ndescs) 850 { 851 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can 852 * be added at once 853 */ 854 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { 855 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 856 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << 857 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 858 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; 859 } 860 861 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 862 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 863 } 864 865 /* Get number of RX descriptors occupied by received packets */ 866 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, 867 struct mvneta_rx_queue *rxq) 868 { 869 u32 val; 870 871 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); 872 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; 873 } 874 875 /* Update num of rx desc called upon return from rx path or 876 * from mvneta_rxq_drop_pkts(). 877 */ 878 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, 879 struct mvneta_rx_queue *rxq, 880 int rx_done, int rx_filled) 881 { 882 u32 val; 883 884 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { 885 val = rx_done | 886 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); 887 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 888 return; 889 } 890 891 /* Only 255 descriptors can be added at once */ 892 while ((rx_done > 0) || (rx_filled > 0)) { 893 if (rx_done <= 0xff) { 894 val = rx_done; 895 rx_done = 0; 896 } else { 897 val = 0xff; 898 rx_done -= 0xff; 899 } 900 if (rx_filled <= 0xff) { 901 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 902 rx_filled = 0; 903 } else { 904 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 905 rx_filled -= 0xff; 906 } 907 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 908 } 909 } 910 911 /* Get pointer to next RX descriptor to be processed by SW */ 912 static struct mvneta_rx_desc * 913 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) 914 { 915 int rx_desc = rxq->next_desc_to_proc; 916 917 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); 918 prefetch(rxq->descs + rxq->next_desc_to_proc); 919 return rxq->descs + rx_desc; 920 } 921 922 /* Change maximum receive size of the port. */ 923 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) 924 { 925 u32 val; 926 927 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 928 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; 929 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << 930 MVNETA_GMAC_MAX_RX_SIZE_SHIFT; 931 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 932 } 933 934 935 /* Set rx queue offset */ 936 static void mvneta_rxq_offset_set(struct mvneta_port *pp, 937 struct mvneta_rx_queue *rxq, 938 int offset) 939 { 940 u32 val; 941 942 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 943 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; 944 945 /* Offset is in */ 946 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); 947 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 948 } 949 950 951 /* Tx descriptors helper methods */ 952 953 /* Update HW with number of TX descriptors to be sent */ 954 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, 955 struct mvneta_tx_queue *txq, 956 int pend_desc) 957 { 958 u32 val; 959 960 pend_desc += txq->pending; 961 962 /* Only 255 Tx descriptors can be added at once */ 963 do { 964 val = min(pend_desc, 255); 965 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 966 pend_desc -= val; 967 } while (pend_desc > 0); 968 txq->pending = 0; 969 } 970 971 /* Get pointer to next TX descriptor to be processed (send) by HW */ 972 static struct mvneta_tx_desc * 973 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) 974 { 975 int tx_desc = txq->next_desc_to_proc; 976 977 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); 978 return txq->descs + tx_desc; 979 } 980 981 /* Release the last allocated TX descriptor. Useful to handle DMA 982 * mapping failures in the TX path. 983 */ 984 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) 985 { 986 if (txq->next_desc_to_proc == 0) 987 txq->next_desc_to_proc = txq->last_desc - 1; 988 else 989 txq->next_desc_to_proc--; 990 } 991 992 /* Set rxq buf size */ 993 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, 994 struct mvneta_rx_queue *rxq, 995 int buf_size) 996 { 997 u32 val; 998 999 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); 1000 1001 val &= ~MVNETA_RXQ_BUF_SIZE_MASK; 1002 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); 1003 1004 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); 1005 } 1006 1007 /* Disable buffer management (BM) */ 1008 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, 1009 struct mvneta_rx_queue *rxq) 1010 { 1011 u32 val; 1012 1013 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1014 val &= ~MVNETA_RXQ_HW_BUF_ALLOC; 1015 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1016 } 1017 1018 /* Enable buffer management (BM) */ 1019 static void mvneta_rxq_bm_enable(struct mvneta_port *pp, 1020 struct mvneta_rx_queue *rxq) 1021 { 1022 u32 val; 1023 1024 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1025 val |= MVNETA_RXQ_HW_BUF_ALLOC; 1026 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1027 } 1028 1029 /* Notify HW about port's assignment of pool for bigger packets */ 1030 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, 1031 struct mvneta_rx_queue *rxq) 1032 { 1033 u32 val; 1034 1035 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1036 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; 1037 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); 1038 1039 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1040 } 1041 1042 /* Notify HW about port's assignment of pool for smaller packets */ 1043 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, 1044 struct mvneta_rx_queue *rxq) 1045 { 1046 u32 val; 1047 1048 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1049 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; 1050 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); 1051 1052 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1053 } 1054 1055 /* Set port's receive buffer size for assigned BM pool */ 1056 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, 1057 int buf_size, 1058 u8 pool_id) 1059 { 1060 u32 val; 1061 1062 if (!IS_ALIGNED(buf_size, 8)) { 1063 dev_warn(pp->dev->dev.parent, 1064 "illegal buf_size value %d, round to %d\n", 1065 buf_size, ALIGN(buf_size, 8)); 1066 buf_size = ALIGN(buf_size, 8); 1067 } 1068 1069 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); 1070 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; 1071 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); 1072 } 1073 1074 /* Configure MBUS window in order to enable access BM internal SRAM */ 1075 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, 1076 u8 target, u8 attr) 1077 { 1078 u32 win_enable, win_protect; 1079 int i; 1080 1081 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); 1082 1083 if (pp->bm_win_id < 0) { 1084 /* Find first not occupied window */ 1085 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { 1086 if (win_enable & (1 << i)) { 1087 pp->bm_win_id = i; 1088 break; 1089 } 1090 } 1091 if (i == MVNETA_MAX_DECODE_WIN) 1092 return -ENOMEM; 1093 } else { 1094 i = pp->bm_win_id; 1095 } 1096 1097 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 1098 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 1099 1100 if (i < 4) 1101 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 1102 1103 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | 1104 (attr << 8) | target); 1105 1106 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); 1107 1108 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); 1109 win_protect |= 3 << (2 * i); 1110 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 1111 1112 win_enable &= ~(1 << i); 1113 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 1114 1115 return 0; 1116 } 1117 1118 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) 1119 { 1120 u32 wsize; 1121 u8 target, attr; 1122 int err; 1123 1124 /* Get BM window information */ 1125 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, 1126 &target, &attr); 1127 if (err < 0) 1128 return err; 1129 1130 pp->bm_win_id = -1; 1131 1132 /* Open NETA -> BM window */ 1133 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, 1134 target, attr); 1135 if (err < 0) { 1136 netdev_info(pp->dev, "fail to configure mbus window to BM\n"); 1137 return err; 1138 } 1139 return 0; 1140 } 1141 1142 /* Assign and initialize pools for port. In case of fail 1143 * buffer manager will remain disabled for current port. 1144 */ 1145 static int mvneta_bm_port_init(struct platform_device *pdev, 1146 struct mvneta_port *pp) 1147 { 1148 struct device_node *dn = pdev->dev.of_node; 1149 u32 long_pool_id, short_pool_id; 1150 1151 if (!pp->neta_armada3700) { 1152 int ret; 1153 1154 ret = mvneta_bm_port_mbus_init(pp); 1155 if (ret) 1156 return ret; 1157 } 1158 1159 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { 1160 netdev_info(pp->dev, "missing long pool id\n"); 1161 return -EINVAL; 1162 } 1163 1164 /* Create port's long pool depending on mtu */ 1165 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, 1166 MVNETA_BM_LONG, pp->id, 1167 MVNETA_RX_PKT_SIZE(pp->dev->mtu)); 1168 if (!pp->pool_long) { 1169 netdev_info(pp->dev, "fail to obtain long pool for port\n"); 1170 return -ENOMEM; 1171 } 1172 1173 pp->pool_long->port_map |= 1 << pp->id; 1174 1175 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, 1176 pp->pool_long->id); 1177 1178 /* If short pool id is not defined, assume using single pool */ 1179 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) 1180 short_pool_id = long_pool_id; 1181 1182 /* Create port's short pool */ 1183 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, 1184 MVNETA_BM_SHORT, pp->id, 1185 MVNETA_BM_SHORT_PKT_SIZE); 1186 if (!pp->pool_short) { 1187 netdev_info(pp->dev, "fail to obtain short pool for port\n"); 1188 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 1189 return -ENOMEM; 1190 } 1191 1192 if (short_pool_id != long_pool_id) { 1193 pp->pool_short->port_map |= 1 << pp->id; 1194 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, 1195 pp->pool_short->id); 1196 } 1197 1198 return 0; 1199 } 1200 1201 /* Update settings of a pool for bigger packets */ 1202 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) 1203 { 1204 struct mvneta_bm_pool *bm_pool = pp->pool_long; 1205 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; 1206 int num; 1207 1208 /* Release all buffers from long pool */ 1209 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); 1210 if (hwbm_pool->buf_num) { 1211 WARN(1, "cannot free all buffers in pool %d\n", 1212 bm_pool->id); 1213 goto bm_mtu_err; 1214 } 1215 1216 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); 1217 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); 1218 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1219 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); 1220 1221 /* Fill entire long pool */ 1222 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); 1223 if (num != hwbm_pool->size) { 1224 WARN(1, "pool %d: %d of %d allocated\n", 1225 bm_pool->id, num, hwbm_pool->size); 1226 goto bm_mtu_err; 1227 } 1228 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); 1229 1230 return; 1231 1232 bm_mtu_err: 1233 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 1234 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); 1235 1236 pp->bm_priv = NULL; 1237 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 1238 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); 1239 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); 1240 } 1241 1242 /* Start the Ethernet port RX and TX activity */ 1243 static void mvneta_port_up(struct mvneta_port *pp) 1244 { 1245 int queue; 1246 u32 q_map; 1247 1248 /* Enable all initialized TXs. */ 1249 q_map = 0; 1250 for (queue = 0; queue < txq_number; queue++) { 1251 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 1252 if (txq->descs) 1253 q_map |= (1 << queue); 1254 } 1255 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 1256 1257 q_map = 0; 1258 /* Enable all initialized RXQs. */ 1259 for (queue = 0; queue < rxq_number; queue++) { 1260 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 1261 1262 if (rxq->descs) 1263 q_map |= (1 << queue); 1264 } 1265 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); 1266 } 1267 1268 /* Stop the Ethernet port activity */ 1269 static void mvneta_port_down(struct mvneta_port *pp) 1270 { 1271 u32 val; 1272 int count; 1273 1274 /* Stop Rx port activity. Check port Rx activity. */ 1275 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; 1276 1277 /* Issue stop command for active channels only */ 1278 if (val != 0) 1279 mvreg_write(pp, MVNETA_RXQ_CMD, 1280 val << MVNETA_RXQ_DISABLE_SHIFT); 1281 1282 /* Wait for all Rx activity to terminate. */ 1283 count = 0; 1284 do { 1285 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { 1286 netdev_warn(pp->dev, 1287 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", 1288 val); 1289 break; 1290 } 1291 mdelay(1); 1292 1293 val = mvreg_read(pp, MVNETA_RXQ_CMD); 1294 } while (val & MVNETA_RXQ_ENABLE_MASK); 1295 1296 /* Stop Tx port activity. Check port Tx activity. Issue stop 1297 * command for active channels only 1298 */ 1299 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; 1300 1301 if (val != 0) 1302 mvreg_write(pp, MVNETA_TXQ_CMD, 1303 (val << MVNETA_TXQ_DISABLE_SHIFT)); 1304 1305 /* Wait for all Tx activity to terminate. */ 1306 count = 0; 1307 do { 1308 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { 1309 netdev_warn(pp->dev, 1310 "TIMEOUT for TX stopped status=0x%08x\n", 1311 val); 1312 break; 1313 } 1314 mdelay(1); 1315 1316 /* Check TX Command reg that all Txqs are stopped */ 1317 val = mvreg_read(pp, MVNETA_TXQ_CMD); 1318 1319 } while (val & MVNETA_TXQ_ENABLE_MASK); 1320 1321 /* Double check to verify that TX FIFO is empty */ 1322 count = 0; 1323 do { 1324 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { 1325 netdev_warn(pp->dev, 1326 "TX FIFO empty timeout status=0x%08x\n", 1327 val); 1328 break; 1329 } 1330 mdelay(1); 1331 1332 val = mvreg_read(pp, MVNETA_PORT_STATUS); 1333 } while (!(val & MVNETA_TX_FIFO_EMPTY) && 1334 (val & MVNETA_TX_IN_PRGRS)); 1335 1336 udelay(200); 1337 } 1338 1339 /* Enable the port by setting the port enable bit of the MAC control register */ 1340 static void mvneta_port_enable(struct mvneta_port *pp) 1341 { 1342 u32 val; 1343 1344 /* Enable port */ 1345 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 1346 val |= MVNETA_GMAC0_PORT_ENABLE; 1347 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1348 } 1349 1350 /* Disable the port and wait for about 200 usec before retuning */ 1351 static void mvneta_port_disable(struct mvneta_port *pp) 1352 { 1353 u32 val; 1354 1355 /* Reset the Enable bit in the Serial Control Register */ 1356 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 1357 val &= ~MVNETA_GMAC0_PORT_ENABLE; 1358 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1359 1360 udelay(200); 1361 } 1362 1363 /* Multicast tables methods */ 1364 1365 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ 1366 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) 1367 { 1368 int offset; 1369 u32 val; 1370 1371 if (queue == -1) { 1372 val = 0; 1373 } else { 1374 val = 0x1 | (queue << 1); 1375 val |= (val << 24) | (val << 16) | (val << 8); 1376 } 1377 1378 for (offset = 0; offset <= 0xc; offset += 4) 1379 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); 1380 } 1381 1382 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ 1383 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) 1384 { 1385 int offset; 1386 u32 val; 1387 1388 if (queue == -1) { 1389 val = 0; 1390 } else { 1391 val = 0x1 | (queue << 1); 1392 val |= (val << 24) | (val << 16) | (val << 8); 1393 } 1394 1395 for (offset = 0; offset <= 0xfc; offset += 4) 1396 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); 1397 1398 } 1399 1400 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ 1401 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) 1402 { 1403 int offset; 1404 u32 val; 1405 1406 if (queue == -1) { 1407 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); 1408 val = 0; 1409 } else { 1410 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); 1411 val = 0x1 | (queue << 1); 1412 val |= (val << 24) | (val << 16) | (val << 8); 1413 } 1414 1415 for (offset = 0; offset <= 0xfc; offset += 4) 1416 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); 1417 } 1418 1419 static void mvneta_percpu_unmask_interrupt(void *arg) 1420 { 1421 struct mvneta_port *pp = arg; 1422 1423 /* All the queue are unmasked, but actually only the ones 1424 * mapped to this CPU will be unmasked 1425 */ 1426 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 1427 MVNETA_RX_INTR_MASK_ALL | 1428 MVNETA_TX_INTR_MASK_ALL | 1429 MVNETA_MISCINTR_INTR_MASK); 1430 } 1431 1432 static void mvneta_percpu_mask_interrupt(void *arg) 1433 { 1434 struct mvneta_port *pp = arg; 1435 1436 /* All the queue are masked, but actually only the ones 1437 * mapped to this CPU will be masked 1438 */ 1439 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1440 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 1441 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 1442 } 1443 1444 static void mvneta_percpu_clear_intr_cause(void *arg) 1445 { 1446 struct mvneta_port *pp = arg; 1447 1448 /* All the queue are cleared, but actually only the ones 1449 * mapped to this CPU will be cleared 1450 */ 1451 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1452 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 1453 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 1454 } 1455 1456 /* This method sets defaults to the NETA port: 1457 * Clears interrupt Cause and Mask registers. 1458 * Clears all MAC tables. 1459 * Sets defaults to all registers. 1460 * Resets RX and TX descriptor rings. 1461 * Resets PHY. 1462 * This method can be called after mvneta_port_down() to return the port 1463 * settings to defaults. 1464 */ 1465 static void mvneta_defaults_set(struct mvneta_port *pp) 1466 { 1467 int cpu; 1468 int queue; 1469 u32 val; 1470 int max_cpu = num_present_cpus(); 1471 1472 /* Clear all Cause registers */ 1473 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); 1474 1475 /* Mask all interrupts */ 1476 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 1477 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1478 1479 /* Enable MBUS Retry bit16 */ 1480 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); 1481 1482 /* Set CPU queue access map. CPUs are assigned to the RX and 1483 * TX queues modulo their number. If there is only one TX 1484 * queue then it is assigned to the CPU associated to the 1485 * default RX queue. 1486 */ 1487 for_each_present_cpu(cpu) { 1488 int rxq_map = 0, txq_map = 0; 1489 int rxq, txq; 1490 if (!pp->neta_armada3700) { 1491 for (rxq = 0; rxq < rxq_number; rxq++) 1492 if ((rxq % max_cpu) == cpu) 1493 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 1494 1495 for (txq = 0; txq < txq_number; txq++) 1496 if ((txq % max_cpu) == cpu) 1497 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); 1498 1499 /* With only one TX queue we configure a special case 1500 * which will allow to get all the irq on a single 1501 * CPU 1502 */ 1503 if (txq_number == 1) 1504 txq_map = (cpu == pp->rxq_def) ? 1505 MVNETA_CPU_TXQ_ACCESS(1) : 0; 1506 1507 } else { 1508 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 1509 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK; 1510 } 1511 1512 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 1513 } 1514 1515 /* Reset RX and TX DMAs */ 1516 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 1517 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 1518 1519 /* Disable Legacy WRR, Disable EJP, Release from reset */ 1520 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); 1521 for (queue = 0; queue < txq_number; queue++) { 1522 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); 1523 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); 1524 } 1525 1526 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 1527 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 1528 1529 /* Set Port Acceleration Mode */ 1530 if (pp->bm_priv) 1531 /* HW buffer management + legacy parser */ 1532 val = MVNETA_ACC_MODE_EXT2; 1533 else 1534 /* SW buffer management + legacy parser */ 1535 val = MVNETA_ACC_MODE_EXT1; 1536 mvreg_write(pp, MVNETA_ACC_MODE, val); 1537 1538 if (pp->bm_priv) 1539 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); 1540 1541 /* Update val of portCfg register accordingly with all RxQueue types */ 1542 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 1543 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 1544 1545 val = 0; 1546 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); 1547 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); 1548 1549 /* Build PORT_SDMA_CONFIG_REG */ 1550 val = 0; 1551 1552 /* Default burst size */ 1553 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 1554 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 1555 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; 1556 1557 #if defined(__BIG_ENDIAN) 1558 val |= MVNETA_DESC_SWAP; 1559 #endif 1560 1561 /* Assign port SDMA configuration */ 1562 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 1563 1564 /* Disable PHY polling in hardware, since we're using the 1565 * kernel phylib to do this. 1566 */ 1567 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); 1568 val &= ~MVNETA_PHY_POLLING_ENABLE; 1569 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); 1570 1571 mvneta_set_ucast_table(pp, -1); 1572 mvneta_set_special_mcast_table(pp, -1); 1573 mvneta_set_other_mcast_table(pp, -1); 1574 1575 /* Set port interrupt enable register - default enable all */ 1576 mvreg_write(pp, MVNETA_INTR_ENABLE, 1577 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK 1578 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); 1579 1580 mvneta_mib_counters_clear(pp); 1581 } 1582 1583 /* Set max sizes for tx queues */ 1584 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) 1585 1586 { 1587 u32 val, size, mtu; 1588 int queue; 1589 1590 mtu = max_tx_size * 8; 1591 if (mtu > MVNETA_TX_MTU_MAX) 1592 mtu = MVNETA_TX_MTU_MAX; 1593 1594 /* Set MTU */ 1595 val = mvreg_read(pp, MVNETA_TX_MTU); 1596 val &= ~MVNETA_TX_MTU_MAX; 1597 val |= mtu; 1598 mvreg_write(pp, MVNETA_TX_MTU, val); 1599 1600 /* TX token size and all TXQs token size must be larger that MTU */ 1601 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); 1602 1603 size = val & MVNETA_TX_TOKEN_SIZE_MAX; 1604 if (size < mtu) { 1605 size = mtu; 1606 val &= ~MVNETA_TX_TOKEN_SIZE_MAX; 1607 val |= size; 1608 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); 1609 } 1610 for (queue = 0; queue < txq_number; queue++) { 1611 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); 1612 1613 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; 1614 if (size < mtu) { 1615 size = mtu; 1616 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; 1617 val |= size; 1618 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); 1619 } 1620 } 1621 } 1622 1623 /* Set unicast address */ 1624 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, 1625 int queue) 1626 { 1627 unsigned int unicast_reg; 1628 unsigned int tbl_offset; 1629 unsigned int reg_offset; 1630 1631 /* Locate the Unicast table entry */ 1632 last_nibble = (0xf & last_nibble); 1633 1634 /* offset from unicast tbl base */ 1635 tbl_offset = (last_nibble / 4) * 4; 1636 1637 /* offset within the above reg */ 1638 reg_offset = last_nibble % 4; 1639 1640 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); 1641 1642 if (queue == -1) { 1643 /* Clear accepts frame bit at specified unicast DA tbl entry */ 1644 unicast_reg &= ~(0xff << (8 * reg_offset)); 1645 } else { 1646 unicast_reg &= ~(0xff << (8 * reg_offset)); 1647 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1648 } 1649 1650 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); 1651 } 1652 1653 /* Set mac address */ 1654 static void mvneta_mac_addr_set(struct mvneta_port *pp, 1655 const unsigned char *addr, int queue) 1656 { 1657 unsigned int mac_h; 1658 unsigned int mac_l; 1659 1660 if (queue != -1) { 1661 mac_l = (addr[4] << 8) | (addr[5]); 1662 mac_h = (addr[0] << 24) | (addr[1] << 16) | 1663 (addr[2] << 8) | (addr[3] << 0); 1664 1665 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); 1666 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); 1667 } 1668 1669 /* Accept frames of this address */ 1670 mvneta_set_ucast_addr(pp, addr[5], queue); 1671 } 1672 1673 /* Set the number of packets that will be received before RX interrupt 1674 * will be generated by HW. 1675 */ 1676 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, 1677 struct mvneta_rx_queue *rxq, u32 value) 1678 { 1679 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), 1680 value | MVNETA_RXQ_NON_OCCUPIED(0)); 1681 } 1682 1683 /* Set the time delay in usec before RX interrupt will be generated by 1684 * HW. 1685 */ 1686 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, 1687 struct mvneta_rx_queue *rxq, u32 value) 1688 { 1689 u32 val; 1690 unsigned long clk_rate; 1691 1692 clk_rate = clk_get_rate(pp->clk); 1693 val = (clk_rate / 1000000) * value; 1694 1695 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); 1696 } 1697 1698 /* Set threshold for TX_DONE pkts coalescing */ 1699 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, 1700 struct mvneta_tx_queue *txq, u32 value) 1701 { 1702 u32 val; 1703 1704 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); 1705 1706 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; 1707 val |= MVNETA_TXQ_SENT_THRESH_MASK(value); 1708 1709 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); 1710 } 1711 1712 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ 1713 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, 1714 u32 phys_addr, void *virt_addr, 1715 struct mvneta_rx_queue *rxq) 1716 { 1717 int i; 1718 1719 rx_desc->buf_phys_addr = phys_addr; 1720 i = rx_desc - rxq->descs; 1721 rxq->buf_virt_addr[i] = virt_addr; 1722 } 1723 1724 /* Decrement sent descriptors counter */ 1725 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, 1726 struct mvneta_tx_queue *txq, 1727 int sent_desc) 1728 { 1729 u32 val; 1730 1731 /* Only 255 TX descriptors can be updated at once */ 1732 while (sent_desc > 0xff) { 1733 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; 1734 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1735 sent_desc = sent_desc - 0xff; 1736 } 1737 1738 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; 1739 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1740 } 1741 1742 /* Get number of TX descriptors already sent by HW */ 1743 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, 1744 struct mvneta_tx_queue *txq) 1745 { 1746 u32 val; 1747 int sent_desc; 1748 1749 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); 1750 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> 1751 MVNETA_TXQ_SENT_DESC_SHIFT; 1752 1753 return sent_desc; 1754 } 1755 1756 /* Get number of sent descriptors and decrement counter. 1757 * The number of sent descriptors is returned. 1758 */ 1759 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, 1760 struct mvneta_tx_queue *txq) 1761 { 1762 int sent_desc; 1763 1764 /* Get number of sent descriptors */ 1765 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); 1766 1767 /* Decrement sent descriptors counter */ 1768 if (sent_desc) 1769 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); 1770 1771 return sent_desc; 1772 } 1773 1774 /* Set TXQ descriptors fields relevant for CSUM calculation */ 1775 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, 1776 int ip_hdr_len, int l4_proto) 1777 { 1778 u32 command; 1779 1780 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 1781 * G_L4_chk, L4_type; required only for checksum 1782 * calculation 1783 */ 1784 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1785 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1786 1787 if (l3_proto == htons(ETH_P_IP)) 1788 command |= MVNETA_TXD_IP_CSUM; 1789 else 1790 command |= MVNETA_TX_L3_IP6; 1791 1792 if (l4_proto == IPPROTO_TCP) 1793 command |= MVNETA_TX_L4_CSUM_FULL; 1794 else if (l4_proto == IPPROTO_UDP) 1795 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; 1796 else 1797 command |= MVNETA_TX_L4_CSUM_NOT; 1798 1799 return command; 1800 } 1801 1802 1803 /* Display more error info */ 1804 static void mvneta_rx_error(struct mvneta_port *pp, 1805 struct mvneta_rx_desc *rx_desc) 1806 { 1807 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1808 u32 status = rx_desc->status; 1809 1810 /* update per-cpu counter */ 1811 u64_stats_update_begin(&stats->syncp); 1812 stats->rx_errors++; 1813 u64_stats_update_end(&stats->syncp); 1814 1815 switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1816 case MVNETA_RXD_ERR_CRC: 1817 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", 1818 status, rx_desc->data_size); 1819 break; 1820 case MVNETA_RXD_ERR_OVERRUN: 1821 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", 1822 status, rx_desc->data_size); 1823 break; 1824 case MVNETA_RXD_ERR_LEN: 1825 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", 1826 status, rx_desc->data_size); 1827 break; 1828 case MVNETA_RXD_ERR_RESOURCE: 1829 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", 1830 status, rx_desc->data_size); 1831 break; 1832 } 1833 } 1834 1835 /* Handle RX checksum offload based on the descriptor's status */ 1836 static int mvneta_rx_csum(struct mvneta_port *pp, u32 status) 1837 { 1838 if ((pp->dev->features & NETIF_F_RXCSUM) && 1839 (status & MVNETA_RXD_L3_IP4) && 1840 (status & MVNETA_RXD_L4_CSUM_OK)) 1841 return CHECKSUM_UNNECESSARY; 1842 1843 return CHECKSUM_NONE; 1844 } 1845 1846 /* Return tx queue pointer (find last set bit) according to <cause> returned 1847 * form tx_done reg. <cause> must not be null. The return value is always a 1848 * valid queue for matching the first one found in <cause>. 1849 */ 1850 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, 1851 u32 cause) 1852 { 1853 int queue = fls(cause) - 1; 1854 1855 return &pp->txqs[queue]; 1856 } 1857 1858 /* Free tx queue skbuffs */ 1859 static void mvneta_txq_bufs_free(struct mvneta_port *pp, 1860 struct mvneta_tx_queue *txq, int num, 1861 struct netdev_queue *nq, bool napi) 1862 { 1863 unsigned int bytes_compl = 0, pkts_compl = 0; 1864 struct xdp_frame_bulk bq; 1865 int i; 1866 1867 xdp_frame_bulk_init(&bq); 1868 1869 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 1870 1871 for (i = 0; i < num; i++) { 1872 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; 1873 struct mvneta_tx_desc *tx_desc = txq->descs + 1874 txq->txq_get_index; 1875 1876 mvneta_txq_inc_get(txq); 1877 1878 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) && 1879 buf->type != MVNETA_TYPE_XDP_TX) 1880 dma_unmap_single(pp->dev->dev.parent, 1881 tx_desc->buf_phys_addr, 1882 tx_desc->data_size, DMA_TO_DEVICE); 1883 if (buf->type == MVNETA_TYPE_SKB && buf->skb) { 1884 bytes_compl += buf->skb->len; 1885 pkts_compl++; 1886 dev_kfree_skb_any(buf->skb); 1887 } else if (buf->type == MVNETA_TYPE_XDP_TX || 1888 buf->type == MVNETA_TYPE_XDP_NDO) { 1889 if (napi && buf->type == MVNETA_TYPE_XDP_TX) 1890 xdp_return_frame_rx_napi(buf->xdpf); 1891 else 1892 xdp_return_frame_bulk(buf->xdpf, &bq); 1893 } 1894 } 1895 xdp_flush_frame_bulk(&bq); 1896 1897 rcu_read_unlock(); 1898 1899 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); 1900 } 1901 1902 /* Handle end of transmission */ 1903 static void mvneta_txq_done(struct mvneta_port *pp, 1904 struct mvneta_tx_queue *txq) 1905 { 1906 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 1907 int tx_done; 1908 1909 tx_done = mvneta_txq_sent_desc_proc(pp, txq); 1910 if (!tx_done) 1911 return; 1912 1913 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); 1914 1915 txq->count -= tx_done; 1916 1917 if (netif_tx_queue_stopped(nq)) { 1918 if (txq->count <= txq->tx_wake_threshold) 1919 netif_tx_wake_queue(nq); 1920 } 1921 } 1922 1923 /* Refill processing for SW buffer management */ 1924 /* Allocate page per descriptor */ 1925 static int mvneta_rx_refill(struct mvneta_port *pp, 1926 struct mvneta_rx_desc *rx_desc, 1927 struct mvneta_rx_queue *rxq, 1928 gfp_t gfp_mask) 1929 { 1930 dma_addr_t phys_addr; 1931 struct page *page; 1932 1933 page = page_pool_alloc_pages(rxq->page_pool, 1934 gfp_mask | __GFP_NOWARN); 1935 if (!page) 1936 return -ENOMEM; 1937 1938 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; 1939 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); 1940 1941 return 0; 1942 } 1943 1944 /* Handle tx checksum */ 1945 static u32 mvneta_skb_tx_csum(struct sk_buff *skb) 1946 { 1947 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1948 int ip_hdr_len = 0; 1949 __be16 l3_proto = vlan_get_protocol(skb); 1950 u8 l4_proto; 1951 1952 if (l3_proto == htons(ETH_P_IP)) { 1953 struct iphdr *ip4h = ip_hdr(skb); 1954 1955 /* Calculate IPv4 checksum and L4 checksum */ 1956 ip_hdr_len = ip4h->ihl; 1957 l4_proto = ip4h->protocol; 1958 } else if (l3_proto == htons(ETH_P_IPV6)) { 1959 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1960 1961 /* Read l4_protocol from one of IPv6 extra headers */ 1962 if (skb_network_header_len(skb) > 0) 1963 ip_hdr_len = (skb_network_header_len(skb) >> 2); 1964 l4_proto = ip6h->nexthdr; 1965 } else 1966 return MVNETA_TX_L4_CSUM_NOT; 1967 1968 return mvneta_txq_desc_csum(skb_network_offset(skb), 1969 l3_proto, ip_hdr_len, l4_proto); 1970 } 1971 1972 return MVNETA_TX_L4_CSUM_NOT; 1973 } 1974 1975 /* Drop packets received by the RXQ and free buffers */ 1976 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, 1977 struct mvneta_rx_queue *rxq) 1978 { 1979 int rx_done, i; 1980 1981 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1982 if (rx_done) 1983 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1984 1985 if (pp->bm_priv) { 1986 for (i = 0; i < rx_done; i++) { 1987 struct mvneta_rx_desc *rx_desc = 1988 mvneta_rxq_next_desc_get(rxq); 1989 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); 1990 struct mvneta_bm_pool *bm_pool; 1991 1992 bm_pool = &pp->bm_priv->bm_pools[pool_id]; 1993 /* Return dropped buffer to the pool */ 1994 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 1995 rx_desc->buf_phys_addr); 1996 } 1997 return; 1998 } 1999 2000 for (i = 0; i < rxq->size; i++) { 2001 struct mvneta_rx_desc *rx_desc = rxq->descs + i; 2002 void *data = rxq->buf_virt_addr[i]; 2003 if (!data || !(rx_desc->buf_phys_addr)) 2004 continue; 2005 2006 page_pool_put_full_page(rxq->page_pool, data, false); 2007 } 2008 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 2009 xdp_rxq_info_unreg(&rxq->xdp_rxq); 2010 page_pool_destroy(rxq->page_pool); 2011 rxq->page_pool = NULL; 2012 } 2013 2014 static void 2015 mvneta_update_stats(struct mvneta_port *pp, 2016 struct mvneta_stats *ps) 2017 { 2018 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2019 2020 u64_stats_update_begin(&stats->syncp); 2021 stats->es.ps.rx_packets += ps->rx_packets; 2022 stats->es.ps.rx_bytes += ps->rx_bytes; 2023 /* xdp */ 2024 stats->es.ps.xdp_redirect += ps->xdp_redirect; 2025 stats->es.ps.xdp_pass += ps->xdp_pass; 2026 stats->es.ps.xdp_drop += ps->xdp_drop; 2027 u64_stats_update_end(&stats->syncp); 2028 } 2029 2030 static inline 2031 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) 2032 { 2033 struct mvneta_rx_desc *rx_desc; 2034 int curr_desc = rxq->first_to_refill; 2035 int i; 2036 2037 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { 2038 rx_desc = rxq->descs + curr_desc; 2039 if (!(rx_desc->buf_phys_addr)) { 2040 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { 2041 struct mvneta_pcpu_stats *stats; 2042 2043 pr_err("Can't refill queue %d. Done %d from %d\n", 2044 rxq->id, i, rxq->refill_num); 2045 2046 stats = this_cpu_ptr(pp->stats); 2047 u64_stats_update_begin(&stats->syncp); 2048 stats->es.refill_error++; 2049 u64_stats_update_end(&stats->syncp); 2050 break; 2051 } 2052 } 2053 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); 2054 } 2055 rxq->refill_num -= i; 2056 rxq->first_to_refill = curr_desc; 2057 2058 return i; 2059 } 2060 2061 static void 2062 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2063 struct xdp_buff *xdp, struct skb_shared_info *sinfo, 2064 int sync_len) 2065 { 2066 int i; 2067 2068 for (i = 0; i < sinfo->nr_frags; i++) 2069 page_pool_put_full_page(rxq->page_pool, 2070 skb_frag_page(&sinfo->frags[i]), true); 2071 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), 2072 sync_len, true); 2073 } 2074 2075 static int 2076 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, 2077 struct xdp_frame *xdpf, bool dma_map) 2078 { 2079 struct mvneta_tx_desc *tx_desc; 2080 struct mvneta_tx_buf *buf; 2081 dma_addr_t dma_addr; 2082 2083 if (txq->count >= txq->tx_stop_threshold) 2084 return MVNETA_XDP_DROPPED; 2085 2086 tx_desc = mvneta_txq_next_desc_get(txq); 2087 2088 buf = &txq->buf[txq->txq_put_index]; 2089 if (dma_map) { 2090 /* ndo_xdp_xmit */ 2091 dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data, 2092 xdpf->len, DMA_TO_DEVICE); 2093 if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { 2094 mvneta_txq_desc_put(txq); 2095 return MVNETA_XDP_DROPPED; 2096 } 2097 buf->type = MVNETA_TYPE_XDP_NDO; 2098 } else { 2099 struct page *page = virt_to_page(xdpf->data); 2100 2101 dma_addr = page_pool_get_dma_addr(page) + 2102 sizeof(*xdpf) + xdpf->headroom; 2103 dma_sync_single_for_device(pp->dev->dev.parent, dma_addr, 2104 xdpf->len, DMA_BIDIRECTIONAL); 2105 buf->type = MVNETA_TYPE_XDP_TX; 2106 } 2107 buf->xdpf = xdpf; 2108 2109 tx_desc->command = MVNETA_TXD_FLZ_DESC; 2110 tx_desc->buf_phys_addr = dma_addr; 2111 tx_desc->data_size = xdpf->len; 2112 2113 mvneta_txq_inc_put(txq); 2114 txq->pending++; 2115 txq->count++; 2116 2117 return MVNETA_XDP_TX; 2118 } 2119 2120 static int 2121 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) 2122 { 2123 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2124 struct mvneta_tx_queue *txq; 2125 struct netdev_queue *nq; 2126 struct xdp_frame *xdpf; 2127 int cpu; 2128 u32 ret; 2129 2130 xdpf = xdp_convert_buff_to_frame(xdp); 2131 if (unlikely(!xdpf)) 2132 return MVNETA_XDP_DROPPED; 2133 2134 cpu = smp_processor_id(); 2135 txq = &pp->txqs[cpu % txq_number]; 2136 nq = netdev_get_tx_queue(pp->dev, txq->id); 2137 2138 __netif_tx_lock(nq, cpu); 2139 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false); 2140 if (ret == MVNETA_XDP_TX) { 2141 u64_stats_update_begin(&stats->syncp); 2142 stats->es.ps.tx_bytes += xdpf->len; 2143 stats->es.ps.tx_packets++; 2144 stats->es.ps.xdp_tx++; 2145 u64_stats_update_end(&stats->syncp); 2146 2147 mvneta_txq_pend_desc_add(pp, txq, 0); 2148 } else { 2149 u64_stats_update_begin(&stats->syncp); 2150 stats->es.ps.xdp_tx_err++; 2151 u64_stats_update_end(&stats->syncp); 2152 } 2153 __netif_tx_unlock(nq); 2154 2155 return ret; 2156 } 2157 2158 static int 2159 mvneta_xdp_xmit(struct net_device *dev, int num_frame, 2160 struct xdp_frame **frames, u32 flags) 2161 { 2162 struct mvneta_port *pp = netdev_priv(dev); 2163 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2164 int i, nxmit_byte = 0, nxmit = 0; 2165 int cpu = smp_processor_id(); 2166 struct mvneta_tx_queue *txq; 2167 struct netdev_queue *nq; 2168 u32 ret; 2169 2170 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) 2171 return -ENETDOWN; 2172 2173 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2174 return -EINVAL; 2175 2176 txq = &pp->txqs[cpu % txq_number]; 2177 nq = netdev_get_tx_queue(pp->dev, txq->id); 2178 2179 __netif_tx_lock(nq, cpu); 2180 for (i = 0; i < num_frame; i++) { 2181 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true); 2182 if (ret != MVNETA_XDP_TX) 2183 break; 2184 2185 nxmit_byte += frames[i]->len; 2186 nxmit++; 2187 } 2188 2189 if (unlikely(flags & XDP_XMIT_FLUSH)) 2190 mvneta_txq_pend_desc_add(pp, txq, 0); 2191 __netif_tx_unlock(nq); 2192 2193 u64_stats_update_begin(&stats->syncp); 2194 stats->es.ps.tx_bytes += nxmit_byte; 2195 stats->es.ps.tx_packets += nxmit; 2196 stats->es.ps.xdp_xmit += nxmit; 2197 stats->es.ps.xdp_xmit_err += num_frame - nxmit; 2198 u64_stats_update_end(&stats->syncp); 2199 2200 return nxmit; 2201 } 2202 2203 static int 2204 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2205 struct bpf_prog *prog, struct xdp_buff *xdp, 2206 u32 frame_sz, struct mvneta_stats *stats) 2207 { 2208 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2209 unsigned int len, data_len, sync; 2210 u32 ret, act; 2211 2212 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; 2213 data_len = xdp->data_end - xdp->data; 2214 act = bpf_prog_run_xdp(prog, xdp); 2215 2216 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 2217 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; 2218 sync = max(sync, len); 2219 2220 switch (act) { 2221 case XDP_PASS: 2222 stats->xdp_pass++; 2223 return MVNETA_XDP_PASS; 2224 case XDP_REDIRECT: { 2225 int err; 2226 2227 err = xdp_do_redirect(pp->dev, xdp, prog); 2228 if (unlikely(err)) { 2229 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); 2230 ret = MVNETA_XDP_DROPPED; 2231 } else { 2232 ret = MVNETA_XDP_REDIR; 2233 stats->xdp_redirect++; 2234 } 2235 break; 2236 } 2237 case XDP_TX: 2238 ret = mvneta_xdp_xmit_back(pp, xdp); 2239 if (ret != MVNETA_XDP_TX) 2240 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); 2241 break; 2242 default: 2243 bpf_warn_invalid_xdp_action(pp->dev, prog, act); 2244 fallthrough; 2245 case XDP_ABORTED: 2246 trace_xdp_exception(pp->dev, prog, act); 2247 fallthrough; 2248 case XDP_DROP: 2249 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); 2250 ret = MVNETA_XDP_DROPPED; 2251 stats->xdp_drop++; 2252 break; 2253 } 2254 2255 stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; 2256 stats->rx_packets++; 2257 2258 return ret; 2259 } 2260 2261 static void 2262 mvneta_swbm_rx_frame(struct mvneta_port *pp, 2263 struct mvneta_rx_desc *rx_desc, 2264 struct mvneta_rx_queue *rxq, 2265 struct xdp_buff *xdp, int *size, 2266 struct page *page) 2267 { 2268 unsigned char *data = page_address(page); 2269 int data_len = -MVNETA_MH_SIZE, len; 2270 struct net_device *dev = pp->dev; 2271 enum dma_data_direction dma_dir; 2272 struct skb_shared_info *sinfo; 2273 2274 if (*size > MVNETA_MAX_RX_BUF_SIZE) { 2275 len = MVNETA_MAX_RX_BUF_SIZE; 2276 data_len += len; 2277 } else { 2278 len = *size; 2279 data_len += len - ETH_FCS_LEN; 2280 } 2281 *size = *size - len; 2282 2283 dma_dir = page_pool_get_dma_dir(rxq->page_pool); 2284 dma_sync_single_for_cpu(dev->dev.parent, 2285 rx_desc->buf_phys_addr, 2286 len, dma_dir); 2287 2288 rx_desc->buf_phys_addr = 0; 2289 2290 /* Prefetch header */ 2291 prefetch(data); 2292 xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, 2293 data_len, false); 2294 2295 sinfo = xdp_get_shared_info_from_buff(xdp); 2296 sinfo->nr_frags = 0; 2297 } 2298 2299 static void 2300 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, 2301 struct mvneta_rx_desc *rx_desc, 2302 struct mvneta_rx_queue *rxq, 2303 struct xdp_buff *xdp, int *size, 2304 struct skb_shared_info *xdp_sinfo, 2305 struct page *page) 2306 { 2307 struct net_device *dev = pp->dev; 2308 enum dma_data_direction dma_dir; 2309 int data_len, len; 2310 2311 if (*size > MVNETA_MAX_RX_BUF_SIZE) { 2312 len = MVNETA_MAX_RX_BUF_SIZE; 2313 data_len = len; 2314 } else { 2315 len = *size; 2316 data_len = len - ETH_FCS_LEN; 2317 } 2318 dma_dir = page_pool_get_dma_dir(rxq->page_pool); 2319 dma_sync_single_for_cpu(dev->dev.parent, 2320 rx_desc->buf_phys_addr, 2321 len, dma_dir); 2322 rx_desc->buf_phys_addr = 0; 2323 2324 if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) { 2325 skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++]; 2326 2327 skb_frag_off_set(frag, pp->rx_offset_correction); 2328 skb_frag_size_set(frag, data_len); 2329 __skb_frag_set_page(frag, page); 2330 } else { 2331 page_pool_put_full_page(rxq->page_pool, page, true); 2332 } 2333 2334 /* last fragment */ 2335 if (len == *size) { 2336 struct skb_shared_info *sinfo; 2337 2338 sinfo = xdp_get_shared_info_from_buff(xdp); 2339 sinfo->nr_frags = xdp_sinfo->nr_frags; 2340 memcpy(sinfo->frags, xdp_sinfo->frags, 2341 sinfo->nr_frags * sizeof(skb_frag_t)); 2342 } 2343 *size -= len; 2344 } 2345 2346 static struct sk_buff * 2347 mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, 2348 struct xdp_buff *xdp, u32 desc_status) 2349 { 2350 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2351 int i, num_frags = sinfo->nr_frags; 2352 struct sk_buff *skb; 2353 2354 skb = build_skb(xdp->data_hard_start, PAGE_SIZE); 2355 if (!skb) 2356 return ERR_PTR(-ENOMEM); 2357 2358 skb_mark_for_recycle(skb); 2359 2360 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2361 skb_put(skb, xdp->data_end - xdp->data); 2362 skb->ip_summed = mvneta_rx_csum(pp, desc_status); 2363 2364 for (i = 0; i < num_frags; i++) { 2365 skb_frag_t *frag = &sinfo->frags[i]; 2366 2367 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 2368 skb_frag_page(frag), skb_frag_off(frag), 2369 skb_frag_size(frag), PAGE_SIZE); 2370 } 2371 2372 return skb; 2373 } 2374 2375 /* Main rx processing when using software buffer management */ 2376 static int mvneta_rx_swbm(struct napi_struct *napi, 2377 struct mvneta_port *pp, int budget, 2378 struct mvneta_rx_queue *rxq) 2379 { 2380 int rx_proc = 0, rx_todo, refill, size = 0; 2381 struct net_device *dev = pp->dev; 2382 struct skb_shared_info sinfo; 2383 struct mvneta_stats ps = {}; 2384 struct bpf_prog *xdp_prog; 2385 u32 desc_status, frame_sz; 2386 struct xdp_buff xdp_buf; 2387 2388 xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq); 2389 xdp_buf.data_hard_start = NULL; 2390 2391 sinfo.nr_frags = 0; 2392 2393 /* Get number of received packets */ 2394 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); 2395 2396 xdp_prog = READ_ONCE(pp->xdp_prog); 2397 2398 /* Fairness NAPI loop */ 2399 while (rx_proc < budget && rx_proc < rx_todo) { 2400 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 2401 u32 rx_status, index; 2402 struct sk_buff *skb; 2403 struct page *page; 2404 2405 index = rx_desc - rxq->descs; 2406 page = (struct page *)rxq->buf_virt_addr[index]; 2407 2408 rx_status = rx_desc->status; 2409 rx_proc++; 2410 rxq->refill_num++; 2411 2412 if (rx_status & MVNETA_RXD_FIRST_DESC) { 2413 /* Check errors only for FIRST descriptor */ 2414 if (rx_status & MVNETA_RXD_ERR_SUMMARY) { 2415 mvneta_rx_error(pp, rx_desc); 2416 goto next; 2417 } 2418 2419 size = rx_desc->data_size; 2420 frame_sz = size - ETH_FCS_LEN; 2421 desc_status = rx_status; 2422 2423 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, 2424 &size, page); 2425 } else { 2426 if (unlikely(!xdp_buf.data_hard_start)) { 2427 rx_desc->buf_phys_addr = 0; 2428 page_pool_put_full_page(rxq->page_pool, page, 2429 true); 2430 goto next; 2431 } 2432 2433 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, 2434 &size, &sinfo, page); 2435 } /* Middle or Last descriptor */ 2436 2437 if (!(rx_status & MVNETA_RXD_LAST_DESC)) 2438 /* no last descriptor this time */ 2439 continue; 2440 2441 if (size) { 2442 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); 2443 goto next; 2444 } 2445 2446 if (xdp_prog && 2447 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) 2448 goto next; 2449 2450 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status); 2451 if (IS_ERR(skb)) { 2452 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2453 2454 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); 2455 2456 u64_stats_update_begin(&stats->syncp); 2457 stats->es.skb_alloc_error++; 2458 stats->rx_dropped++; 2459 u64_stats_update_end(&stats->syncp); 2460 2461 goto next; 2462 } 2463 2464 ps.rx_bytes += skb->len; 2465 ps.rx_packets++; 2466 2467 skb->protocol = eth_type_trans(skb, dev); 2468 napi_gro_receive(napi, skb); 2469 next: 2470 xdp_buf.data_hard_start = NULL; 2471 sinfo.nr_frags = 0; 2472 } 2473 2474 if (xdp_buf.data_hard_start) 2475 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); 2476 2477 if (ps.xdp_redirect) 2478 xdp_do_flush_map(); 2479 2480 if (ps.rx_packets) 2481 mvneta_update_stats(pp, &ps); 2482 2483 /* return some buffers to hardware queue, one at a time is too slow */ 2484 refill = mvneta_rx_refill_queue(pp, rxq); 2485 2486 /* Update rxq management counters */ 2487 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); 2488 2489 return ps.rx_packets; 2490 } 2491 2492 /* Main rx processing when using hardware buffer management */ 2493 static int mvneta_rx_hwbm(struct napi_struct *napi, 2494 struct mvneta_port *pp, int rx_todo, 2495 struct mvneta_rx_queue *rxq) 2496 { 2497 struct net_device *dev = pp->dev; 2498 int rx_done; 2499 u32 rcvd_pkts = 0; 2500 u32 rcvd_bytes = 0; 2501 2502 /* Get number of received packets */ 2503 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 2504 2505 if (rx_todo > rx_done) 2506 rx_todo = rx_done; 2507 2508 rx_done = 0; 2509 2510 /* Fairness NAPI loop */ 2511 while (rx_done < rx_todo) { 2512 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 2513 struct mvneta_bm_pool *bm_pool = NULL; 2514 struct sk_buff *skb; 2515 unsigned char *data; 2516 dma_addr_t phys_addr; 2517 u32 rx_status, frag_size; 2518 int rx_bytes, err; 2519 u8 pool_id; 2520 2521 rx_done++; 2522 rx_status = rx_desc->status; 2523 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 2524 data = (u8 *)(uintptr_t)rx_desc->buf_cookie; 2525 phys_addr = rx_desc->buf_phys_addr; 2526 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); 2527 bm_pool = &pp->bm_priv->bm_pools[pool_id]; 2528 2529 if (!mvneta_rxq_desc_is_first_last(rx_status) || 2530 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 2531 err_drop_frame_ret_pool: 2532 /* Return the buffer to the pool */ 2533 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2534 rx_desc->buf_phys_addr); 2535 err_drop_frame: 2536 mvneta_rx_error(pp, rx_desc); 2537 /* leave the descriptor untouched */ 2538 continue; 2539 } 2540 2541 if (rx_bytes <= rx_copybreak) { 2542 /* better copy a small frame and not unmap the DMA region */ 2543 skb = netdev_alloc_skb_ip_align(dev, rx_bytes); 2544 if (unlikely(!skb)) 2545 goto err_drop_frame_ret_pool; 2546 2547 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, 2548 rx_desc->buf_phys_addr, 2549 MVNETA_MH_SIZE + NET_SKB_PAD, 2550 rx_bytes, 2551 DMA_FROM_DEVICE); 2552 skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD, 2553 rx_bytes); 2554 2555 skb->protocol = eth_type_trans(skb, dev); 2556 skb->ip_summed = mvneta_rx_csum(pp, rx_status); 2557 napi_gro_receive(napi, skb); 2558 2559 rcvd_pkts++; 2560 rcvd_bytes += rx_bytes; 2561 2562 /* Return the buffer to the pool */ 2563 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2564 rx_desc->buf_phys_addr); 2565 2566 /* leave the descriptor and buffer untouched */ 2567 continue; 2568 } 2569 2570 /* Refill processing */ 2571 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); 2572 if (err) { 2573 struct mvneta_pcpu_stats *stats; 2574 2575 netdev_err(dev, "Linux processing - Can't refill\n"); 2576 2577 stats = this_cpu_ptr(pp->stats); 2578 u64_stats_update_begin(&stats->syncp); 2579 stats->es.refill_error++; 2580 u64_stats_update_end(&stats->syncp); 2581 2582 goto err_drop_frame_ret_pool; 2583 } 2584 2585 frag_size = bm_pool->hwbm_pool.frag_size; 2586 2587 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); 2588 2589 /* After refill old buffer has to be unmapped regardless 2590 * the skb is successfully built or not. 2591 */ 2592 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, 2593 bm_pool->buf_size, DMA_FROM_DEVICE); 2594 if (!skb) 2595 goto err_drop_frame; 2596 2597 rcvd_pkts++; 2598 rcvd_bytes += rx_bytes; 2599 2600 /* Linux processing */ 2601 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); 2602 skb_put(skb, rx_bytes); 2603 2604 skb->protocol = eth_type_trans(skb, dev); 2605 skb->ip_summed = mvneta_rx_csum(pp, rx_status); 2606 2607 napi_gro_receive(napi, skb); 2608 } 2609 2610 if (rcvd_pkts) { 2611 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2612 2613 u64_stats_update_begin(&stats->syncp); 2614 stats->es.ps.rx_packets += rcvd_pkts; 2615 stats->es.ps.rx_bytes += rcvd_bytes; 2616 u64_stats_update_end(&stats->syncp); 2617 } 2618 2619 /* Update rxq management counters */ 2620 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 2621 2622 return rx_done; 2623 } 2624 2625 static inline void 2626 mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq) 2627 { 2628 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2629 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2630 struct mvneta_tx_desc *tx_desc; 2631 2632 tx_desc = mvneta_txq_next_desc_get(txq); 2633 tx_desc->data_size = hdr_len; 2634 tx_desc->command = mvneta_skb_tx_csum(skb); 2635 tx_desc->command |= MVNETA_TXD_F_DESC; 2636 tx_desc->buf_phys_addr = txq->tso_hdrs_phys + 2637 txq->txq_put_index * TSO_HEADER_SIZE; 2638 buf->type = MVNETA_TYPE_SKB; 2639 buf->skb = NULL; 2640 2641 mvneta_txq_inc_put(txq); 2642 } 2643 2644 static inline int 2645 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, 2646 struct sk_buff *skb, char *data, int size, 2647 bool last_tcp, bool is_last) 2648 { 2649 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2650 struct mvneta_tx_desc *tx_desc; 2651 2652 tx_desc = mvneta_txq_next_desc_get(txq); 2653 tx_desc->data_size = size; 2654 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, 2655 size, DMA_TO_DEVICE); 2656 if (unlikely(dma_mapping_error(dev->dev.parent, 2657 tx_desc->buf_phys_addr))) { 2658 mvneta_txq_desc_put(txq); 2659 return -ENOMEM; 2660 } 2661 2662 tx_desc->command = 0; 2663 buf->type = MVNETA_TYPE_SKB; 2664 buf->skb = NULL; 2665 2666 if (last_tcp) { 2667 /* last descriptor in the TCP packet */ 2668 tx_desc->command = MVNETA_TXD_L_DESC; 2669 2670 /* last descriptor in SKB */ 2671 if (is_last) 2672 buf->skb = skb; 2673 } 2674 mvneta_txq_inc_put(txq); 2675 return 0; 2676 } 2677 2678 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, 2679 struct mvneta_tx_queue *txq) 2680 { 2681 int hdr_len, total_len, data_left; 2682 int desc_count = 0; 2683 struct mvneta_port *pp = netdev_priv(dev); 2684 struct tso_t tso; 2685 int i; 2686 2687 /* Count needed descriptors */ 2688 if ((txq->count + tso_count_descs(skb)) >= txq->size) 2689 return 0; 2690 2691 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { 2692 pr_info("*** Is this even possible?\n"); 2693 return 0; 2694 } 2695 2696 /* Initialize the TSO handler, and prepare the first payload */ 2697 hdr_len = tso_start(skb, &tso); 2698 2699 total_len = skb->len - hdr_len; 2700 while (total_len > 0) { 2701 char *hdr; 2702 2703 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 2704 total_len -= data_left; 2705 desc_count++; 2706 2707 /* prepare packet headers: MAC + IP + TCP */ 2708 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; 2709 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 2710 2711 mvneta_tso_put_hdr(skb, txq); 2712 2713 while (data_left > 0) { 2714 int size; 2715 desc_count++; 2716 2717 size = min_t(int, tso.size, data_left); 2718 2719 if (mvneta_tso_put_data(dev, txq, skb, 2720 tso.data, size, 2721 size == data_left, 2722 total_len == 0)) 2723 goto err_release; 2724 data_left -= size; 2725 2726 tso_build_data(skb, &tso, size); 2727 } 2728 } 2729 2730 return desc_count; 2731 2732 err_release: 2733 /* Release all used data descriptors; header descriptors must not 2734 * be DMA-unmapped. 2735 */ 2736 for (i = desc_count - 1; i >= 0; i--) { 2737 struct mvneta_tx_desc *tx_desc = txq->descs + i; 2738 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) 2739 dma_unmap_single(pp->dev->dev.parent, 2740 tx_desc->buf_phys_addr, 2741 tx_desc->data_size, 2742 DMA_TO_DEVICE); 2743 mvneta_txq_desc_put(txq); 2744 } 2745 return 0; 2746 } 2747 2748 /* Handle tx fragmentation processing */ 2749 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 2750 struct mvneta_tx_queue *txq) 2751 { 2752 struct mvneta_tx_desc *tx_desc; 2753 int i, nr_frags = skb_shinfo(skb)->nr_frags; 2754 2755 for (i = 0; i < nr_frags; i++) { 2756 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2757 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2758 void *addr = skb_frag_address(frag); 2759 2760 tx_desc = mvneta_txq_next_desc_get(txq); 2761 tx_desc->data_size = skb_frag_size(frag); 2762 2763 tx_desc->buf_phys_addr = 2764 dma_map_single(pp->dev->dev.parent, addr, 2765 tx_desc->data_size, DMA_TO_DEVICE); 2766 2767 if (dma_mapping_error(pp->dev->dev.parent, 2768 tx_desc->buf_phys_addr)) { 2769 mvneta_txq_desc_put(txq); 2770 goto error; 2771 } 2772 2773 if (i == nr_frags - 1) { 2774 /* Last descriptor */ 2775 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 2776 buf->skb = skb; 2777 } else { 2778 /* Descriptor in the middle: Not First, Not Last */ 2779 tx_desc->command = 0; 2780 buf->skb = NULL; 2781 } 2782 buf->type = MVNETA_TYPE_SKB; 2783 mvneta_txq_inc_put(txq); 2784 } 2785 2786 return 0; 2787 2788 error: 2789 /* Release all descriptors that were used to map fragments of 2790 * this packet, as well as the corresponding DMA mappings 2791 */ 2792 for (i = i - 1; i >= 0; i--) { 2793 tx_desc = txq->descs + i; 2794 dma_unmap_single(pp->dev->dev.parent, 2795 tx_desc->buf_phys_addr, 2796 tx_desc->data_size, 2797 DMA_TO_DEVICE); 2798 mvneta_txq_desc_put(txq); 2799 } 2800 2801 return -ENOMEM; 2802 } 2803 2804 /* Main tx processing */ 2805 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) 2806 { 2807 struct mvneta_port *pp = netdev_priv(dev); 2808 u16 txq_id = skb_get_queue_mapping(skb); 2809 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 2810 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2811 struct mvneta_tx_desc *tx_desc; 2812 int len = skb->len; 2813 int frags = 0; 2814 u32 tx_cmd; 2815 2816 if (!netif_running(dev)) 2817 goto out; 2818 2819 if (skb_is_gso(skb)) { 2820 frags = mvneta_tx_tso(skb, dev, txq); 2821 goto out; 2822 } 2823 2824 frags = skb_shinfo(skb)->nr_frags + 1; 2825 2826 /* Get a descriptor for the first part of the packet */ 2827 tx_desc = mvneta_txq_next_desc_get(txq); 2828 2829 tx_cmd = mvneta_skb_tx_csum(skb); 2830 2831 tx_desc->data_size = skb_headlen(skb); 2832 2833 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, 2834 tx_desc->data_size, 2835 DMA_TO_DEVICE); 2836 if (unlikely(dma_mapping_error(dev->dev.parent, 2837 tx_desc->buf_phys_addr))) { 2838 mvneta_txq_desc_put(txq); 2839 frags = 0; 2840 goto out; 2841 } 2842 2843 buf->type = MVNETA_TYPE_SKB; 2844 if (frags == 1) { 2845 /* First and Last descriptor */ 2846 tx_cmd |= MVNETA_TXD_FLZ_DESC; 2847 tx_desc->command = tx_cmd; 2848 buf->skb = skb; 2849 mvneta_txq_inc_put(txq); 2850 } else { 2851 /* First but not Last */ 2852 tx_cmd |= MVNETA_TXD_F_DESC; 2853 buf->skb = NULL; 2854 mvneta_txq_inc_put(txq); 2855 tx_desc->command = tx_cmd; 2856 /* Continue with other skb fragments */ 2857 if (mvneta_tx_frag_process(pp, skb, txq)) { 2858 dma_unmap_single(dev->dev.parent, 2859 tx_desc->buf_phys_addr, 2860 tx_desc->data_size, 2861 DMA_TO_DEVICE); 2862 mvneta_txq_desc_put(txq); 2863 frags = 0; 2864 goto out; 2865 } 2866 } 2867 2868 out: 2869 if (frags > 0) { 2870 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 2871 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2872 2873 netdev_tx_sent_queue(nq, len); 2874 2875 txq->count += frags; 2876 if (txq->count >= txq->tx_stop_threshold) 2877 netif_tx_stop_queue(nq); 2878 2879 if (!netdev_xmit_more() || netif_xmit_stopped(nq) || 2880 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) 2881 mvneta_txq_pend_desc_add(pp, txq, frags); 2882 else 2883 txq->pending += frags; 2884 2885 u64_stats_update_begin(&stats->syncp); 2886 stats->es.ps.tx_bytes += len; 2887 stats->es.ps.tx_packets++; 2888 u64_stats_update_end(&stats->syncp); 2889 } else { 2890 dev->stats.tx_dropped++; 2891 dev_kfree_skb_any(skb); 2892 } 2893 2894 return NETDEV_TX_OK; 2895 } 2896 2897 2898 /* Free tx resources, when resetting a port */ 2899 static void mvneta_txq_done_force(struct mvneta_port *pp, 2900 struct mvneta_tx_queue *txq) 2901 2902 { 2903 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 2904 int tx_done = txq->count; 2905 2906 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false); 2907 2908 /* reset txq */ 2909 txq->count = 0; 2910 txq->txq_put_index = 0; 2911 txq->txq_get_index = 0; 2912 } 2913 2914 /* Handle tx done - called in softirq context. The <cause_tx_done> argument 2915 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. 2916 */ 2917 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) 2918 { 2919 struct mvneta_tx_queue *txq; 2920 struct netdev_queue *nq; 2921 int cpu = smp_processor_id(); 2922 2923 while (cause_tx_done) { 2924 txq = mvneta_tx_done_policy(pp, cause_tx_done); 2925 2926 nq = netdev_get_tx_queue(pp->dev, txq->id); 2927 __netif_tx_lock(nq, cpu); 2928 2929 if (txq->count) 2930 mvneta_txq_done(pp, txq); 2931 2932 __netif_tx_unlock(nq); 2933 cause_tx_done &= ~((1 << txq->id)); 2934 } 2935 } 2936 2937 /* Compute crc8 of the specified address, using a unique algorithm , 2938 * according to hw spec, different than generic crc8 algorithm 2939 */ 2940 static int mvneta_addr_crc(unsigned char *addr) 2941 { 2942 int crc = 0; 2943 int i; 2944 2945 for (i = 0; i < ETH_ALEN; i++) { 2946 int j; 2947 2948 crc = (crc ^ addr[i]) << 8; 2949 for (j = 7; j >= 0; j--) { 2950 if (crc & (0x100 << j)) 2951 crc ^= 0x107 << j; 2952 } 2953 } 2954 2955 return crc; 2956 } 2957 2958 /* This method controls the net device special MAC multicast support. 2959 * The Special Multicast Table for MAC addresses supports MAC of the form 2960 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 2961 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 2962 * Table entries in the DA-Filter table. This method set the Special 2963 * Multicast Table appropriate entry. 2964 */ 2965 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, 2966 unsigned char last_byte, 2967 int queue) 2968 { 2969 unsigned int smc_table_reg; 2970 unsigned int tbl_offset; 2971 unsigned int reg_offset; 2972 2973 /* Register offset from SMC table base */ 2974 tbl_offset = (last_byte / 4); 2975 /* Entry offset within the above reg */ 2976 reg_offset = last_byte % 4; 2977 2978 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST 2979 + tbl_offset * 4)); 2980 2981 if (queue == -1) 2982 smc_table_reg &= ~(0xff << (8 * reg_offset)); 2983 else { 2984 smc_table_reg &= ~(0xff << (8 * reg_offset)); 2985 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 2986 } 2987 2988 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, 2989 smc_table_reg); 2990 } 2991 2992 /* This method controls the network device Other MAC multicast support. 2993 * The Other Multicast Table is used for multicast of another type. 2994 * A CRC-8 is used as an index to the Other Multicast Table entries 2995 * in the DA-Filter table. 2996 * The method gets the CRC-8 value from the calling routine and 2997 * sets the Other Multicast Table appropriate entry according to the 2998 * specified CRC-8 . 2999 */ 3000 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, 3001 unsigned char crc8, 3002 int queue) 3003 { 3004 unsigned int omc_table_reg; 3005 unsigned int tbl_offset; 3006 unsigned int reg_offset; 3007 3008 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ 3009 reg_offset = crc8 % 4; /* Entry offset within the above reg */ 3010 3011 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); 3012 3013 if (queue == -1) { 3014 /* Clear accepts frame bit at specified Other DA table entry */ 3015 omc_table_reg &= ~(0xff << (8 * reg_offset)); 3016 } else { 3017 omc_table_reg &= ~(0xff << (8 * reg_offset)); 3018 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 3019 } 3020 3021 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); 3022 } 3023 3024 /* The network device supports multicast using two tables: 3025 * 1) Special Multicast Table for MAC addresses of the form 3026 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 3027 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 3028 * Table entries in the DA-Filter table. 3029 * 2) Other Multicast Table for multicast of another type. A CRC-8 value 3030 * is used as an index to the Other Multicast Table entries in the 3031 * DA-Filter table. 3032 */ 3033 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, 3034 int queue) 3035 { 3036 unsigned char crc_result = 0; 3037 3038 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { 3039 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); 3040 return 0; 3041 } 3042 3043 crc_result = mvneta_addr_crc(p_addr); 3044 if (queue == -1) { 3045 if (pp->mcast_count[crc_result] == 0) { 3046 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", 3047 crc_result); 3048 return -EINVAL; 3049 } 3050 3051 pp->mcast_count[crc_result]--; 3052 if (pp->mcast_count[crc_result] != 0) { 3053 netdev_info(pp->dev, 3054 "After delete there are %d valid Mcast for crc8=0x%02x\n", 3055 pp->mcast_count[crc_result], crc_result); 3056 return -EINVAL; 3057 } 3058 } else 3059 pp->mcast_count[crc_result]++; 3060 3061 mvneta_set_other_mcast_addr(pp, crc_result, queue); 3062 3063 return 0; 3064 } 3065 3066 /* Configure Fitering mode of Ethernet port */ 3067 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, 3068 int is_promisc) 3069 { 3070 u32 port_cfg_reg, val; 3071 3072 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); 3073 3074 val = mvreg_read(pp, MVNETA_TYPE_PRIO); 3075 3076 /* Set / Clear UPM bit in port configuration register */ 3077 if (is_promisc) { 3078 /* Accept all Unicast addresses */ 3079 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; 3080 val |= MVNETA_FORCE_UNI; 3081 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); 3082 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); 3083 } else { 3084 /* Reject all Unicast addresses */ 3085 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; 3086 val &= ~MVNETA_FORCE_UNI; 3087 } 3088 3089 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); 3090 mvreg_write(pp, MVNETA_TYPE_PRIO, val); 3091 } 3092 3093 /* register unicast and multicast addresses */ 3094 static void mvneta_set_rx_mode(struct net_device *dev) 3095 { 3096 struct mvneta_port *pp = netdev_priv(dev); 3097 struct netdev_hw_addr *ha; 3098 3099 if (dev->flags & IFF_PROMISC) { 3100 /* Accept all: Multicast + Unicast */ 3101 mvneta_rx_unicast_promisc_set(pp, 1); 3102 mvneta_set_ucast_table(pp, pp->rxq_def); 3103 mvneta_set_special_mcast_table(pp, pp->rxq_def); 3104 mvneta_set_other_mcast_table(pp, pp->rxq_def); 3105 } else { 3106 /* Accept single Unicast */ 3107 mvneta_rx_unicast_promisc_set(pp, 0); 3108 mvneta_set_ucast_table(pp, -1); 3109 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); 3110 3111 if (dev->flags & IFF_ALLMULTI) { 3112 /* Accept all multicast */ 3113 mvneta_set_special_mcast_table(pp, pp->rxq_def); 3114 mvneta_set_other_mcast_table(pp, pp->rxq_def); 3115 } else { 3116 /* Accept only initialized multicast */ 3117 mvneta_set_special_mcast_table(pp, -1); 3118 mvneta_set_other_mcast_table(pp, -1); 3119 3120 if (!netdev_mc_empty(dev)) { 3121 netdev_for_each_mc_addr(ha, dev) { 3122 mvneta_mcast_addr_set(pp, ha->addr, 3123 pp->rxq_def); 3124 } 3125 } 3126 } 3127 } 3128 } 3129 3130 /* Interrupt handling - the callback for request_irq() */ 3131 static irqreturn_t mvneta_isr(int irq, void *dev_id) 3132 { 3133 struct mvneta_port *pp = (struct mvneta_port *)dev_id; 3134 3135 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 3136 napi_schedule(&pp->napi); 3137 3138 return IRQ_HANDLED; 3139 } 3140 3141 /* Interrupt handling - the callback for request_percpu_irq() */ 3142 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id) 3143 { 3144 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; 3145 3146 disable_percpu_irq(port->pp->dev->irq); 3147 napi_schedule(&port->napi); 3148 3149 return IRQ_HANDLED; 3150 } 3151 3152 static void mvneta_link_change(struct mvneta_port *pp) 3153 { 3154 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 3155 3156 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); 3157 } 3158 3159 /* NAPI handler 3160 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 3161 * packets on the corresponding TXQ (Bit 0 is for TX queue 1). 3162 * Bits 8 -15 of the cause Rx Tx register indicate that are received 3163 * packets on the corresponding RXQ (Bit 8 is for RX queue 0). 3164 * Each CPU has its own causeRxTx register 3165 */ 3166 static int mvneta_poll(struct napi_struct *napi, int budget) 3167 { 3168 int rx_done = 0; 3169 u32 cause_rx_tx; 3170 int rx_queue; 3171 struct mvneta_port *pp = netdev_priv(napi->dev); 3172 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); 3173 3174 if (!netif_running(pp->dev)) { 3175 napi_complete(napi); 3176 return rx_done; 3177 } 3178 3179 /* Read cause register */ 3180 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); 3181 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { 3182 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); 3183 3184 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 3185 3186 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE | 3187 MVNETA_CAUSE_LINK_CHANGE)) 3188 mvneta_link_change(pp); 3189 } 3190 3191 /* Release Tx descriptors */ 3192 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { 3193 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); 3194 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; 3195 } 3196 3197 /* For the case where the last mvneta_poll did not process all 3198 * RX packets 3199 */ 3200 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : 3201 port->cause_rx_tx; 3202 3203 rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); 3204 if (rx_queue) { 3205 rx_queue = rx_queue - 1; 3206 if (pp->bm_priv) 3207 rx_done = mvneta_rx_hwbm(napi, pp, budget, 3208 &pp->rxqs[rx_queue]); 3209 else 3210 rx_done = mvneta_rx_swbm(napi, pp, budget, 3211 &pp->rxqs[rx_queue]); 3212 } 3213 3214 if (rx_done < budget) { 3215 cause_rx_tx = 0; 3216 napi_complete_done(napi, rx_done); 3217 3218 if (pp->neta_armada3700) { 3219 unsigned long flags; 3220 3221 local_irq_save(flags); 3222 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 3223 MVNETA_RX_INTR_MASK(rxq_number) | 3224 MVNETA_TX_INTR_MASK(txq_number) | 3225 MVNETA_MISCINTR_INTR_MASK); 3226 local_irq_restore(flags); 3227 } else { 3228 enable_percpu_irq(pp->dev->irq, 0); 3229 } 3230 } 3231 3232 if (pp->neta_armada3700) 3233 pp->cause_rx_tx = cause_rx_tx; 3234 else 3235 port->cause_rx_tx = cause_rx_tx; 3236 3237 return rx_done; 3238 } 3239 3240 static int mvneta_create_page_pool(struct mvneta_port *pp, 3241 struct mvneta_rx_queue *rxq, int size) 3242 { 3243 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); 3244 struct page_pool_params pp_params = { 3245 .order = 0, 3246 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 3247 .pool_size = size, 3248 .nid = NUMA_NO_NODE, 3249 .dev = pp->dev->dev.parent, 3250 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 3251 .offset = pp->rx_offset_correction, 3252 .max_len = MVNETA_MAX_RX_BUF_SIZE, 3253 }; 3254 int err; 3255 3256 rxq->page_pool = page_pool_create(&pp_params); 3257 if (IS_ERR(rxq->page_pool)) { 3258 err = PTR_ERR(rxq->page_pool); 3259 rxq->page_pool = NULL; 3260 return err; 3261 } 3262 3263 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0); 3264 if (err < 0) 3265 goto err_free_pp; 3266 3267 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 3268 rxq->page_pool); 3269 if (err) 3270 goto err_unregister_rxq; 3271 3272 return 0; 3273 3274 err_unregister_rxq: 3275 xdp_rxq_info_unreg(&rxq->xdp_rxq); 3276 err_free_pp: 3277 page_pool_destroy(rxq->page_pool); 3278 rxq->page_pool = NULL; 3279 return err; 3280 } 3281 3282 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ 3283 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 3284 int num) 3285 { 3286 int i, err; 3287 3288 err = mvneta_create_page_pool(pp, rxq, num); 3289 if (err < 0) 3290 return err; 3291 3292 for (i = 0; i < num; i++) { 3293 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); 3294 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, 3295 GFP_KERNEL) != 0) { 3296 netdev_err(pp->dev, 3297 "%s:rxq %d, %d of %d buffs filled\n", 3298 __func__, rxq->id, i, num); 3299 break; 3300 } 3301 } 3302 3303 /* Add this number of RX descriptors as non occupied (ready to 3304 * get packets) 3305 */ 3306 mvneta_rxq_non_occup_desc_add(pp, rxq, i); 3307 3308 return i; 3309 } 3310 3311 /* Free all packets pending transmit from all TXQs and reset TX port */ 3312 static void mvneta_tx_reset(struct mvneta_port *pp) 3313 { 3314 int queue; 3315 3316 /* free the skb's in the tx ring */ 3317 for (queue = 0; queue < txq_number; queue++) 3318 mvneta_txq_done_force(pp, &pp->txqs[queue]); 3319 3320 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 3321 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 3322 } 3323 3324 static void mvneta_rx_reset(struct mvneta_port *pp) 3325 { 3326 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 3327 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 3328 } 3329 3330 /* Rx/Tx queue initialization/cleanup methods */ 3331 3332 static int mvneta_rxq_sw_init(struct mvneta_port *pp, 3333 struct mvneta_rx_queue *rxq) 3334 { 3335 rxq->size = pp->rx_ring_size; 3336 3337 /* Allocate memory for RX descriptors */ 3338 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 3339 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 3340 &rxq->descs_phys, GFP_KERNEL); 3341 if (!rxq->descs) 3342 return -ENOMEM; 3343 3344 rxq->last_desc = rxq->size - 1; 3345 3346 return 0; 3347 } 3348 3349 static void mvneta_rxq_hw_init(struct mvneta_port *pp, 3350 struct mvneta_rx_queue *rxq) 3351 { 3352 /* Set Rx descriptors queue starting address */ 3353 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); 3354 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); 3355 3356 /* Set coalescing pkts and time */ 3357 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 3358 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 3359 3360 if (!pp->bm_priv) { 3361 /* Set Offset */ 3362 mvneta_rxq_offset_set(pp, rxq, 0); 3363 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? 3364 MVNETA_MAX_RX_BUF_SIZE : 3365 MVNETA_RX_BUF_SIZE(pp->pkt_size)); 3366 mvneta_rxq_bm_disable(pp, rxq); 3367 mvneta_rxq_fill(pp, rxq, rxq->size); 3368 } else { 3369 /* Set Offset */ 3370 mvneta_rxq_offset_set(pp, rxq, 3371 NET_SKB_PAD - pp->rx_offset_correction); 3372 3373 mvneta_rxq_bm_enable(pp, rxq); 3374 /* Fill RXQ with buffers from RX pool */ 3375 mvneta_rxq_long_pool_set(pp, rxq); 3376 mvneta_rxq_short_pool_set(pp, rxq); 3377 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); 3378 } 3379 } 3380 3381 /* Create a specified RX queue */ 3382 static int mvneta_rxq_init(struct mvneta_port *pp, 3383 struct mvneta_rx_queue *rxq) 3384 3385 { 3386 int ret; 3387 3388 ret = mvneta_rxq_sw_init(pp, rxq); 3389 if (ret < 0) 3390 return ret; 3391 3392 mvneta_rxq_hw_init(pp, rxq); 3393 3394 return 0; 3395 } 3396 3397 /* Cleanup Rx queue */ 3398 static void mvneta_rxq_deinit(struct mvneta_port *pp, 3399 struct mvneta_rx_queue *rxq) 3400 { 3401 mvneta_rxq_drop_pkts(pp, rxq); 3402 3403 if (rxq->descs) 3404 dma_free_coherent(pp->dev->dev.parent, 3405 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 3406 rxq->descs, 3407 rxq->descs_phys); 3408 3409 rxq->descs = NULL; 3410 rxq->last_desc = 0; 3411 rxq->next_desc_to_proc = 0; 3412 rxq->descs_phys = 0; 3413 rxq->first_to_refill = 0; 3414 rxq->refill_num = 0; 3415 } 3416 3417 static int mvneta_txq_sw_init(struct mvneta_port *pp, 3418 struct mvneta_tx_queue *txq) 3419 { 3420 int cpu; 3421 3422 txq->size = pp->tx_ring_size; 3423 3424 /* A queue must always have room for at least one skb. 3425 * Therefore, stop the queue when the free entries reaches 3426 * the maximum number of descriptors per skb. 3427 */ 3428 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; 3429 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; 3430 3431 /* Allocate memory for TX descriptors */ 3432 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 3433 txq->size * MVNETA_DESC_ALIGNED_SIZE, 3434 &txq->descs_phys, GFP_KERNEL); 3435 if (!txq->descs) 3436 return -ENOMEM; 3437 3438 txq->last_desc = txq->size - 1; 3439 3440 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); 3441 if (!txq->buf) 3442 return -ENOMEM; 3443 3444 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 3445 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, 3446 txq->size * TSO_HEADER_SIZE, 3447 &txq->tso_hdrs_phys, GFP_KERNEL); 3448 if (!txq->tso_hdrs) 3449 return -ENOMEM; 3450 3451 /* Setup XPS mapping */ 3452 if (pp->neta_armada3700) 3453 cpu = 0; 3454 else if (txq_number > 1) 3455 cpu = txq->id % num_present_cpus(); 3456 else 3457 cpu = pp->rxq_def % num_present_cpus(); 3458 cpumask_set_cpu(cpu, &txq->affinity_mask); 3459 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); 3460 3461 return 0; 3462 } 3463 3464 static void mvneta_txq_hw_init(struct mvneta_port *pp, 3465 struct mvneta_tx_queue *txq) 3466 { 3467 /* Set maximum bandwidth for enabled TXQs */ 3468 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); 3469 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); 3470 3471 /* Set Tx descriptors queue starting address */ 3472 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); 3473 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); 3474 3475 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 3476 } 3477 3478 /* Create and initialize a tx queue */ 3479 static int mvneta_txq_init(struct mvneta_port *pp, 3480 struct mvneta_tx_queue *txq) 3481 { 3482 int ret; 3483 3484 ret = mvneta_txq_sw_init(pp, txq); 3485 if (ret < 0) 3486 return ret; 3487 3488 mvneta_txq_hw_init(pp, txq); 3489 3490 return 0; 3491 } 3492 3493 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ 3494 static void mvneta_txq_sw_deinit(struct mvneta_port *pp, 3495 struct mvneta_tx_queue *txq) 3496 { 3497 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 3498 3499 kfree(txq->buf); 3500 3501 if (txq->tso_hdrs) 3502 dma_free_coherent(pp->dev->dev.parent, 3503 txq->size * TSO_HEADER_SIZE, 3504 txq->tso_hdrs, txq->tso_hdrs_phys); 3505 if (txq->descs) 3506 dma_free_coherent(pp->dev->dev.parent, 3507 txq->size * MVNETA_DESC_ALIGNED_SIZE, 3508 txq->descs, txq->descs_phys); 3509 3510 netdev_tx_reset_queue(nq); 3511 3512 txq->descs = NULL; 3513 txq->last_desc = 0; 3514 txq->next_desc_to_proc = 0; 3515 txq->descs_phys = 0; 3516 } 3517 3518 static void mvneta_txq_hw_deinit(struct mvneta_port *pp, 3519 struct mvneta_tx_queue *txq) 3520 { 3521 /* Set minimum bandwidth for disabled TXQs */ 3522 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); 3523 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); 3524 3525 /* Set Tx descriptors queue starting address and size */ 3526 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); 3527 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); 3528 } 3529 3530 static void mvneta_txq_deinit(struct mvneta_port *pp, 3531 struct mvneta_tx_queue *txq) 3532 { 3533 mvneta_txq_sw_deinit(pp, txq); 3534 mvneta_txq_hw_deinit(pp, txq); 3535 } 3536 3537 /* Cleanup all Tx queues */ 3538 static void mvneta_cleanup_txqs(struct mvneta_port *pp) 3539 { 3540 int queue; 3541 3542 for (queue = 0; queue < txq_number; queue++) 3543 mvneta_txq_deinit(pp, &pp->txqs[queue]); 3544 } 3545 3546 /* Cleanup all Rx queues */ 3547 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) 3548 { 3549 int queue; 3550 3551 for (queue = 0; queue < rxq_number; queue++) 3552 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 3553 } 3554 3555 3556 /* Init all Rx queues */ 3557 static int mvneta_setup_rxqs(struct mvneta_port *pp) 3558 { 3559 int queue; 3560 3561 for (queue = 0; queue < rxq_number; queue++) { 3562 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); 3563 3564 if (err) { 3565 netdev_err(pp->dev, "%s: can't create rxq=%d\n", 3566 __func__, queue); 3567 mvneta_cleanup_rxqs(pp); 3568 return err; 3569 } 3570 } 3571 3572 return 0; 3573 } 3574 3575 /* Init all tx queues */ 3576 static int mvneta_setup_txqs(struct mvneta_port *pp) 3577 { 3578 int queue; 3579 3580 for (queue = 0; queue < txq_number; queue++) { 3581 int err = mvneta_txq_init(pp, &pp->txqs[queue]); 3582 if (err) { 3583 netdev_err(pp->dev, "%s: can't create txq=%d\n", 3584 __func__, queue); 3585 mvneta_cleanup_txqs(pp); 3586 return err; 3587 } 3588 } 3589 3590 return 0; 3591 } 3592 3593 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) 3594 { 3595 int ret; 3596 3597 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); 3598 if (ret) 3599 return ret; 3600 3601 return phy_power_on(pp->comphy); 3602 } 3603 3604 static int mvneta_config_interface(struct mvneta_port *pp, 3605 phy_interface_t interface) 3606 { 3607 int ret = 0; 3608 3609 if (pp->comphy) { 3610 if (interface == PHY_INTERFACE_MODE_SGMII || 3611 interface == PHY_INTERFACE_MODE_1000BASEX || 3612 interface == PHY_INTERFACE_MODE_2500BASEX) { 3613 ret = mvneta_comphy_init(pp, interface); 3614 } 3615 } else { 3616 switch (interface) { 3617 case PHY_INTERFACE_MODE_QSGMII: 3618 mvreg_write(pp, MVNETA_SERDES_CFG, 3619 MVNETA_QSGMII_SERDES_PROTO); 3620 break; 3621 3622 case PHY_INTERFACE_MODE_SGMII: 3623 case PHY_INTERFACE_MODE_1000BASEX: 3624 mvreg_write(pp, MVNETA_SERDES_CFG, 3625 MVNETA_SGMII_SERDES_PROTO); 3626 break; 3627 3628 case PHY_INTERFACE_MODE_2500BASEX: 3629 mvreg_write(pp, MVNETA_SERDES_CFG, 3630 MVNETA_HSGMII_SERDES_PROTO); 3631 break; 3632 default: 3633 break; 3634 } 3635 } 3636 3637 pp->phy_interface = interface; 3638 3639 return ret; 3640 } 3641 3642 static void mvneta_start_dev(struct mvneta_port *pp) 3643 { 3644 int cpu; 3645 3646 WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); 3647 3648 mvneta_max_rx_size_set(pp, pp->pkt_size); 3649 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 3650 3651 /* start the Rx/Tx activity */ 3652 mvneta_port_enable(pp); 3653 3654 if (!pp->neta_armada3700) { 3655 /* Enable polling on the port */ 3656 for_each_online_cpu(cpu) { 3657 struct mvneta_pcpu_port *port = 3658 per_cpu_ptr(pp->ports, cpu); 3659 3660 napi_enable(&port->napi); 3661 } 3662 } else { 3663 napi_enable(&pp->napi); 3664 } 3665 3666 /* Unmask interrupts. It has to be done from each CPU */ 3667 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 3668 3669 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 3670 MVNETA_CAUSE_PHY_STATUS_CHANGE | 3671 MVNETA_CAUSE_LINK_CHANGE); 3672 3673 phylink_start(pp->phylink); 3674 3675 /* We may have called phylink_speed_down before */ 3676 phylink_speed_up(pp->phylink); 3677 3678 netif_tx_start_all_queues(pp->dev); 3679 3680 clear_bit(__MVNETA_DOWN, &pp->state); 3681 } 3682 3683 static void mvneta_stop_dev(struct mvneta_port *pp) 3684 { 3685 unsigned int cpu; 3686 3687 set_bit(__MVNETA_DOWN, &pp->state); 3688 3689 if (device_may_wakeup(&pp->dev->dev)) 3690 phylink_speed_down(pp->phylink, false); 3691 3692 phylink_stop(pp->phylink); 3693 3694 if (!pp->neta_armada3700) { 3695 for_each_online_cpu(cpu) { 3696 struct mvneta_pcpu_port *port = 3697 per_cpu_ptr(pp->ports, cpu); 3698 3699 napi_disable(&port->napi); 3700 } 3701 } else { 3702 napi_disable(&pp->napi); 3703 } 3704 3705 netif_carrier_off(pp->dev); 3706 3707 mvneta_port_down(pp); 3708 netif_tx_stop_all_queues(pp->dev); 3709 3710 /* Stop the port activity */ 3711 mvneta_port_disable(pp); 3712 3713 /* Clear all ethernet port interrupts */ 3714 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); 3715 3716 /* Mask all ethernet port interrupts */ 3717 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 3718 3719 mvneta_tx_reset(pp); 3720 mvneta_rx_reset(pp); 3721 3722 WARN_ON(phy_power_off(pp->comphy)); 3723 } 3724 3725 static void mvneta_percpu_enable(void *arg) 3726 { 3727 struct mvneta_port *pp = arg; 3728 3729 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); 3730 } 3731 3732 static void mvneta_percpu_disable(void *arg) 3733 { 3734 struct mvneta_port *pp = arg; 3735 3736 disable_percpu_irq(pp->dev->irq); 3737 } 3738 3739 /* Change the device mtu */ 3740 static int mvneta_change_mtu(struct net_device *dev, int mtu) 3741 { 3742 struct mvneta_port *pp = netdev_priv(dev); 3743 int ret; 3744 3745 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { 3746 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", 3747 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); 3748 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); 3749 } 3750 3751 if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) { 3752 netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu); 3753 return -EINVAL; 3754 } 3755 3756 dev->mtu = mtu; 3757 3758 if (!netif_running(dev)) { 3759 if (pp->bm_priv) 3760 mvneta_bm_update_mtu(pp, mtu); 3761 3762 netdev_update_features(dev); 3763 return 0; 3764 } 3765 3766 /* The interface is running, so we have to force a 3767 * reallocation of the queues 3768 */ 3769 mvneta_stop_dev(pp); 3770 on_each_cpu(mvneta_percpu_disable, pp, true); 3771 3772 mvneta_cleanup_txqs(pp); 3773 mvneta_cleanup_rxqs(pp); 3774 3775 if (pp->bm_priv) 3776 mvneta_bm_update_mtu(pp, mtu); 3777 3778 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); 3779 3780 ret = mvneta_setup_rxqs(pp); 3781 if (ret) { 3782 netdev_err(dev, "unable to setup rxqs after MTU change\n"); 3783 return ret; 3784 } 3785 3786 ret = mvneta_setup_txqs(pp); 3787 if (ret) { 3788 netdev_err(dev, "unable to setup txqs after MTU change\n"); 3789 return ret; 3790 } 3791 3792 on_each_cpu(mvneta_percpu_enable, pp, true); 3793 mvneta_start_dev(pp); 3794 3795 netdev_update_features(dev); 3796 3797 return 0; 3798 } 3799 3800 static netdev_features_t mvneta_fix_features(struct net_device *dev, 3801 netdev_features_t features) 3802 { 3803 struct mvneta_port *pp = netdev_priv(dev); 3804 3805 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { 3806 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 3807 netdev_info(dev, 3808 "Disable IP checksum for MTU greater than %dB\n", 3809 pp->tx_csum_limit); 3810 } 3811 3812 return features; 3813 } 3814 3815 /* Get mac address */ 3816 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) 3817 { 3818 u32 mac_addr_l, mac_addr_h; 3819 3820 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); 3821 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); 3822 addr[0] = (mac_addr_h >> 24) & 0xFF; 3823 addr[1] = (mac_addr_h >> 16) & 0xFF; 3824 addr[2] = (mac_addr_h >> 8) & 0xFF; 3825 addr[3] = mac_addr_h & 0xFF; 3826 addr[4] = (mac_addr_l >> 8) & 0xFF; 3827 addr[5] = mac_addr_l & 0xFF; 3828 } 3829 3830 /* Handle setting mac address */ 3831 static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 3832 { 3833 struct mvneta_port *pp = netdev_priv(dev); 3834 struct sockaddr *sockaddr = addr; 3835 int ret; 3836 3837 ret = eth_prepare_mac_addr_change(dev, addr); 3838 if (ret < 0) 3839 return ret; 3840 /* Remove previous address table entry */ 3841 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 3842 3843 /* Set new addr in hw */ 3844 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); 3845 3846 eth_commit_mac_addr_change(dev, addr); 3847 return 0; 3848 } 3849 3850 static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs) 3851 { 3852 return container_of(pcs, struct mvneta_port, phylink_pcs); 3853 } 3854 3855 static int mvneta_pcs_validate(struct phylink_pcs *pcs, 3856 unsigned long *supported, 3857 const struct phylink_link_state *state) 3858 { 3859 /* We only support QSGMII, SGMII, 802.3z and RGMII modes. 3860 * When in 802.3z mode, we must have AN enabled: 3861 * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... 3862 * When <PortType> = 1 (1000BASE-X) this field must be set to 1." 3863 */ 3864 if (phy_interface_mode_is_8023z(state->interface) && 3865 !phylink_test(state->advertising, Autoneg)) 3866 return -EINVAL; 3867 3868 return 0; 3869 } 3870 3871 static void mvneta_pcs_get_state(struct phylink_pcs *pcs, 3872 struct phylink_link_state *state) 3873 { 3874 struct mvneta_port *pp = mvneta_pcs_to_port(pcs); 3875 u32 gmac_stat; 3876 3877 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 3878 3879 if (gmac_stat & MVNETA_GMAC_SPEED_1000) 3880 state->speed = 3881 state->interface == PHY_INTERFACE_MODE_2500BASEX ? 3882 SPEED_2500 : SPEED_1000; 3883 else if (gmac_stat & MVNETA_GMAC_SPEED_100) 3884 state->speed = SPEED_100; 3885 else 3886 state->speed = SPEED_10; 3887 3888 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); 3889 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); 3890 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); 3891 3892 if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) 3893 state->pause |= MLO_PAUSE_RX; 3894 if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) 3895 state->pause |= MLO_PAUSE_TX; 3896 } 3897 3898 static int mvneta_pcs_config(struct phylink_pcs *pcs, 3899 unsigned int mode, phy_interface_t interface, 3900 const unsigned long *advertising, 3901 bool permit_pause_to_mac) 3902 { 3903 struct mvneta_port *pp = mvneta_pcs_to_port(pcs); 3904 u32 mask, val, an, old_an, changed; 3905 3906 mask = MVNETA_GMAC_INBAND_AN_ENABLE | 3907 MVNETA_GMAC_INBAND_RESTART_AN | 3908 MVNETA_GMAC_AN_SPEED_EN | 3909 MVNETA_GMAC_AN_FLOW_CTRL_EN | 3910 MVNETA_GMAC_AN_DUPLEX_EN; 3911 3912 if (phylink_autoneg_inband(mode)) { 3913 mask |= MVNETA_GMAC_CONFIG_MII_SPEED | 3914 MVNETA_GMAC_CONFIG_GMII_SPEED | 3915 MVNETA_GMAC_CONFIG_FULL_DUPLEX; 3916 val = MVNETA_GMAC_INBAND_AN_ENABLE; 3917 3918 if (interface == PHY_INTERFACE_MODE_SGMII) { 3919 /* SGMII mode receives the speed and duplex from PHY */ 3920 val |= MVNETA_GMAC_AN_SPEED_EN | 3921 MVNETA_GMAC_AN_DUPLEX_EN; 3922 } else { 3923 /* 802.3z mode has fixed speed and duplex */ 3924 val |= MVNETA_GMAC_CONFIG_GMII_SPEED | 3925 MVNETA_GMAC_CONFIG_FULL_DUPLEX; 3926 3927 /* The FLOW_CTRL_EN bit selects either the hardware 3928 * automatically or the CONFIG_FLOW_CTRL manually 3929 * controls the GMAC pause mode. 3930 */ 3931 if (permit_pause_to_mac) 3932 val |= MVNETA_GMAC_AN_FLOW_CTRL_EN; 3933 3934 /* Update the advertisement bits */ 3935 mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; 3936 if (phylink_test(advertising, Pause)) 3937 val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; 3938 } 3939 } else { 3940 /* Phy or fixed speed - disable in-band AN modes */ 3941 val = 0; 3942 } 3943 3944 old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 3945 an = (an & ~mask) | val; 3946 changed = old_an ^ an; 3947 if (changed) 3948 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an); 3949 3950 /* We are only interested in the advertisement bits changing */ 3951 return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL); 3952 } 3953 3954 static void mvneta_pcs_an_restart(struct phylink_pcs *pcs) 3955 { 3956 struct mvneta_port *pp = mvneta_pcs_to_port(pcs); 3957 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 3958 3959 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 3960 gmac_an | MVNETA_GMAC_INBAND_RESTART_AN); 3961 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 3962 gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); 3963 } 3964 3965 static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = { 3966 .pcs_validate = mvneta_pcs_validate, 3967 .pcs_get_state = mvneta_pcs_get_state, 3968 .pcs_config = mvneta_pcs_config, 3969 .pcs_an_restart = mvneta_pcs_an_restart, 3970 }; 3971 3972 static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode, 3973 phy_interface_t interface) 3974 { 3975 struct net_device *ndev = to_net_dev(config->dev); 3976 struct mvneta_port *pp = netdev_priv(ndev); 3977 u32 val; 3978 3979 if (pp->phy_interface != interface || 3980 phylink_autoneg_inband(mode)) { 3981 /* Force the link down when changing the interface or if in 3982 * in-band mode. According to Armada 370 documentation, we 3983 * can only change the port mode and in-band enable when the 3984 * link is down. 3985 */ 3986 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 3987 val &= ~MVNETA_GMAC_FORCE_LINK_PASS; 3988 val |= MVNETA_GMAC_FORCE_LINK_DOWN; 3989 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 3990 } 3991 3992 if (pp->phy_interface != interface) 3993 WARN_ON(phy_power_off(pp->comphy)); 3994 3995 /* Enable the 1ms clock */ 3996 if (phylink_autoneg_inband(mode)) { 3997 unsigned long rate = clk_get_rate(pp->clk); 3998 3999 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, 4000 MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000)); 4001 } 4002 4003 return 0; 4004 } 4005 4006 static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, 4007 const struct phylink_link_state *state) 4008 { 4009 struct net_device *ndev = to_net_dev(config->dev); 4010 struct mvneta_port *pp = netdev_priv(ndev); 4011 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 4012 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 4013 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); 4014 4015 new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; 4016 new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | 4017 MVNETA_GMAC2_PORT_RESET); 4018 new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); 4019 4020 /* Even though it might look weird, when we're configured in 4021 * SGMII or QSGMII mode, the RGMII bit needs to be set. 4022 */ 4023 new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII; 4024 4025 if (state->interface == PHY_INTERFACE_MODE_QSGMII || 4026 state->interface == PHY_INTERFACE_MODE_SGMII || 4027 phy_interface_mode_is_8023z(state->interface)) 4028 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; 4029 4030 if (!phylink_autoneg_inband(mode)) { 4031 /* Phy or fixed speed - nothing to do, leave the 4032 * configured speed, duplex and flow control as-is. 4033 */ 4034 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 4035 /* SGMII mode receives the state from the PHY */ 4036 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; 4037 } else { 4038 /* 802.3z negotiation - only 1000base-X */ 4039 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; 4040 } 4041 4042 /* When at 2.5G, the link partner can send frames with shortened 4043 * preambles. 4044 */ 4045 if (state->interface == PHY_INTERFACE_MODE_2500BASEX) 4046 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; 4047 4048 if (new_ctrl0 != gmac_ctrl0) 4049 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); 4050 if (new_ctrl2 != gmac_ctrl2) 4051 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); 4052 if (new_ctrl4 != gmac_ctrl4) 4053 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); 4054 4055 if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { 4056 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 4057 MVNETA_GMAC2_PORT_RESET) != 0) 4058 continue; 4059 } 4060 } 4061 4062 static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode, 4063 phy_interface_t interface) 4064 { 4065 struct net_device *ndev = to_net_dev(config->dev); 4066 struct mvneta_port *pp = netdev_priv(ndev); 4067 u32 val, clk; 4068 4069 /* Disable 1ms clock if not in in-band mode */ 4070 if (!phylink_autoneg_inband(mode)) { 4071 clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); 4072 clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; 4073 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk); 4074 } 4075 4076 if (pp->phy_interface != interface) 4077 /* Enable the Serdes PHY */ 4078 WARN_ON(mvneta_config_interface(pp, interface)); 4079 4080 /* Allow the link to come up if in in-band mode, otherwise the 4081 * link is forced via mac_link_down()/mac_link_up() 4082 */ 4083 if (phylink_autoneg_inband(mode)) { 4084 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4085 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; 4086 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4087 } 4088 4089 return 0; 4090 } 4091 4092 static void mvneta_set_eee(struct mvneta_port *pp, bool enable) 4093 { 4094 u32 lpi_ctl1; 4095 4096 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); 4097 if (enable) 4098 lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; 4099 else 4100 lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; 4101 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); 4102 } 4103 4104 static void mvneta_mac_link_down(struct phylink_config *config, 4105 unsigned int mode, phy_interface_t interface) 4106 { 4107 struct net_device *ndev = to_net_dev(config->dev); 4108 struct mvneta_port *pp = netdev_priv(ndev); 4109 u32 val; 4110 4111 mvneta_port_down(pp); 4112 4113 if (!phylink_autoneg_inband(mode)) { 4114 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4115 val &= ~MVNETA_GMAC_FORCE_LINK_PASS; 4116 val |= MVNETA_GMAC_FORCE_LINK_DOWN; 4117 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4118 } 4119 4120 pp->eee_active = false; 4121 mvneta_set_eee(pp, false); 4122 } 4123 4124 static void mvneta_mac_link_up(struct phylink_config *config, 4125 struct phy_device *phy, 4126 unsigned int mode, phy_interface_t interface, 4127 int speed, int duplex, 4128 bool tx_pause, bool rx_pause) 4129 { 4130 struct net_device *ndev = to_net_dev(config->dev); 4131 struct mvneta_port *pp = netdev_priv(ndev); 4132 u32 val; 4133 4134 if (!phylink_autoneg_inband(mode)) { 4135 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4136 val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN | 4137 MVNETA_GMAC_CONFIG_MII_SPEED | 4138 MVNETA_GMAC_CONFIG_GMII_SPEED | 4139 MVNETA_GMAC_CONFIG_FLOW_CTRL | 4140 MVNETA_GMAC_CONFIG_FULL_DUPLEX); 4141 val |= MVNETA_GMAC_FORCE_LINK_PASS; 4142 4143 if (speed == SPEED_1000 || speed == SPEED_2500) 4144 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 4145 else if (speed == SPEED_100) 4146 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 4147 4148 if (duplex == DUPLEX_FULL) 4149 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 4150 4151 if (tx_pause || rx_pause) 4152 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; 4153 4154 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4155 } else { 4156 /* When inband doesn't cover flow control or flow control is 4157 * disabled, we need to manually configure it. This bit will 4158 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset. 4159 */ 4160 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4161 val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL; 4162 4163 if (tx_pause || rx_pause) 4164 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; 4165 4166 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4167 } 4168 4169 mvneta_port_up(pp); 4170 4171 if (phy && pp->eee_enabled) { 4172 pp->eee_active = phy_init_eee(phy, 0) >= 0; 4173 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); 4174 } 4175 } 4176 4177 static const struct phylink_mac_ops mvneta_phylink_ops = { 4178 .validate = phylink_generic_validate, 4179 .mac_prepare = mvneta_mac_prepare, 4180 .mac_config = mvneta_mac_config, 4181 .mac_finish = mvneta_mac_finish, 4182 .mac_link_down = mvneta_mac_link_down, 4183 .mac_link_up = mvneta_mac_link_up, 4184 }; 4185 4186 static int mvneta_mdio_probe(struct mvneta_port *pp) 4187 { 4188 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 4189 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); 4190 4191 if (err) 4192 netdev_err(pp->dev, "could not attach PHY: %d\n", err); 4193 4194 phylink_ethtool_get_wol(pp->phylink, &wol); 4195 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); 4196 4197 /* PHY WoL may be enabled but device wakeup disabled */ 4198 if (wol.supported) 4199 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); 4200 4201 return err; 4202 } 4203 4204 static void mvneta_mdio_remove(struct mvneta_port *pp) 4205 { 4206 phylink_disconnect_phy(pp->phylink); 4207 } 4208 4209 /* Electing a CPU must be done in an atomic way: it should be done 4210 * after or before the removal/insertion of a CPU and this function is 4211 * not reentrant. 4212 */ 4213 static void mvneta_percpu_elect(struct mvneta_port *pp) 4214 { 4215 int elected_cpu = 0, max_cpu, cpu, i = 0; 4216 4217 /* Use the cpu associated to the rxq when it is online, in all 4218 * the other cases, use the cpu 0 which can't be offline. 4219 */ 4220 if (cpu_online(pp->rxq_def)) 4221 elected_cpu = pp->rxq_def; 4222 4223 max_cpu = num_present_cpus(); 4224 4225 for_each_online_cpu(cpu) { 4226 int rxq_map = 0, txq_map = 0; 4227 int rxq; 4228 4229 for (rxq = 0; rxq < rxq_number; rxq++) 4230 if ((rxq % max_cpu) == cpu) 4231 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 4232 4233 if (cpu == elected_cpu) 4234 /* Map the default receive queue to the elected CPU */ 4235 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); 4236 4237 /* We update the TX queue map only if we have one 4238 * queue. In this case we associate the TX queue to 4239 * the CPU bound to the default RX queue 4240 */ 4241 if (txq_number == 1) 4242 txq_map = (cpu == elected_cpu) ? 4243 MVNETA_CPU_TXQ_ACCESS(1) : 0; 4244 else 4245 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 4246 MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 4247 4248 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 4249 4250 /* Update the interrupt mask on each CPU according the 4251 * new mapping 4252 */ 4253 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 4254 pp, true); 4255 i++; 4256 4257 } 4258 }; 4259 4260 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) 4261 { 4262 int other_cpu; 4263 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4264 node_online); 4265 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 4266 4267 /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts 4268 * are routed to CPU 0, so we don't need all the cpu-hotplug support 4269 */ 4270 if (pp->neta_armada3700) 4271 return 0; 4272 4273 spin_lock(&pp->lock); 4274 /* 4275 * Configuring the driver for a new CPU while the driver is 4276 * stopping is racy, so just avoid it. 4277 */ 4278 if (pp->is_stopped) { 4279 spin_unlock(&pp->lock); 4280 return 0; 4281 } 4282 netif_tx_stop_all_queues(pp->dev); 4283 4284 /* 4285 * We have to synchronise on tha napi of each CPU except the one 4286 * just being woken up 4287 */ 4288 for_each_online_cpu(other_cpu) { 4289 if (other_cpu != cpu) { 4290 struct mvneta_pcpu_port *other_port = 4291 per_cpu_ptr(pp->ports, other_cpu); 4292 4293 napi_synchronize(&other_port->napi); 4294 } 4295 } 4296 4297 /* Mask all ethernet port interrupts */ 4298 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4299 napi_enable(&port->napi); 4300 4301 /* 4302 * Enable per-CPU interrupts on the CPU that is 4303 * brought up. 4304 */ 4305 mvneta_percpu_enable(pp); 4306 4307 /* 4308 * Enable per-CPU interrupt on the one CPU we care 4309 * about. 4310 */ 4311 mvneta_percpu_elect(pp); 4312 4313 /* Unmask all ethernet port interrupts */ 4314 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 4315 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 4316 MVNETA_CAUSE_PHY_STATUS_CHANGE | 4317 MVNETA_CAUSE_LINK_CHANGE); 4318 netif_tx_start_all_queues(pp->dev); 4319 spin_unlock(&pp->lock); 4320 return 0; 4321 } 4322 4323 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node) 4324 { 4325 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4326 node_online); 4327 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 4328 4329 /* 4330 * Thanks to this lock we are sure that any pending cpu election is 4331 * done. 4332 */ 4333 spin_lock(&pp->lock); 4334 /* Mask all ethernet port interrupts */ 4335 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4336 spin_unlock(&pp->lock); 4337 4338 napi_synchronize(&port->napi); 4339 napi_disable(&port->napi); 4340 /* Disable per-CPU interrupts on the CPU that is brought down. */ 4341 mvneta_percpu_disable(pp); 4342 return 0; 4343 } 4344 4345 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) 4346 { 4347 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4348 node_dead); 4349 4350 /* Check if a new CPU must be elected now this on is down */ 4351 spin_lock(&pp->lock); 4352 mvneta_percpu_elect(pp); 4353 spin_unlock(&pp->lock); 4354 /* Unmask all ethernet port interrupts */ 4355 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 4356 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 4357 MVNETA_CAUSE_PHY_STATUS_CHANGE | 4358 MVNETA_CAUSE_LINK_CHANGE); 4359 netif_tx_start_all_queues(pp->dev); 4360 return 0; 4361 } 4362 4363 static int mvneta_open(struct net_device *dev) 4364 { 4365 struct mvneta_port *pp = netdev_priv(dev); 4366 int ret; 4367 4368 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 4369 4370 ret = mvneta_setup_rxqs(pp); 4371 if (ret) 4372 return ret; 4373 4374 ret = mvneta_setup_txqs(pp); 4375 if (ret) 4376 goto err_cleanup_rxqs; 4377 4378 /* Connect to port interrupt line */ 4379 if (pp->neta_armada3700) 4380 ret = request_irq(pp->dev->irq, mvneta_isr, 0, 4381 dev->name, pp); 4382 else 4383 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, 4384 dev->name, pp->ports); 4385 if (ret) { 4386 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); 4387 goto err_cleanup_txqs; 4388 } 4389 4390 if (!pp->neta_armada3700) { 4391 /* Enable per-CPU interrupt on all the CPU to handle our RX 4392 * queue interrupts 4393 */ 4394 on_each_cpu(mvneta_percpu_enable, pp, true); 4395 4396 pp->is_stopped = false; 4397 /* Register a CPU notifier to handle the case where our CPU 4398 * might be taken offline. 4399 */ 4400 ret = cpuhp_state_add_instance_nocalls(online_hpstate, 4401 &pp->node_online); 4402 if (ret) 4403 goto err_free_irq; 4404 4405 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4406 &pp->node_dead); 4407 if (ret) 4408 goto err_free_online_hp; 4409 } 4410 4411 ret = mvneta_mdio_probe(pp); 4412 if (ret < 0) { 4413 netdev_err(dev, "cannot probe MDIO bus\n"); 4414 goto err_free_dead_hp; 4415 } 4416 4417 mvneta_start_dev(pp); 4418 4419 return 0; 4420 4421 err_free_dead_hp: 4422 if (!pp->neta_armada3700) 4423 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4424 &pp->node_dead); 4425 err_free_online_hp: 4426 if (!pp->neta_armada3700) 4427 cpuhp_state_remove_instance_nocalls(online_hpstate, 4428 &pp->node_online); 4429 err_free_irq: 4430 if (pp->neta_armada3700) { 4431 free_irq(pp->dev->irq, pp); 4432 } else { 4433 on_each_cpu(mvneta_percpu_disable, pp, true); 4434 free_percpu_irq(pp->dev->irq, pp->ports); 4435 } 4436 err_cleanup_txqs: 4437 mvneta_cleanup_txqs(pp); 4438 err_cleanup_rxqs: 4439 mvneta_cleanup_rxqs(pp); 4440 return ret; 4441 } 4442 4443 /* Stop the port, free port interrupt line */ 4444 static int mvneta_stop(struct net_device *dev) 4445 { 4446 struct mvneta_port *pp = netdev_priv(dev); 4447 4448 if (!pp->neta_armada3700) { 4449 /* Inform that we are stopping so we don't want to setup the 4450 * driver for new CPUs in the notifiers. The code of the 4451 * notifier for CPU online is protected by the same spinlock, 4452 * so when we get the lock, the notifer work is done. 4453 */ 4454 spin_lock(&pp->lock); 4455 pp->is_stopped = true; 4456 spin_unlock(&pp->lock); 4457 4458 mvneta_stop_dev(pp); 4459 mvneta_mdio_remove(pp); 4460 4461 cpuhp_state_remove_instance_nocalls(online_hpstate, 4462 &pp->node_online); 4463 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4464 &pp->node_dead); 4465 on_each_cpu(mvneta_percpu_disable, pp, true); 4466 free_percpu_irq(dev->irq, pp->ports); 4467 } else { 4468 mvneta_stop_dev(pp); 4469 mvneta_mdio_remove(pp); 4470 free_irq(dev->irq, pp); 4471 } 4472 4473 mvneta_cleanup_rxqs(pp); 4474 mvneta_cleanup_txqs(pp); 4475 4476 return 0; 4477 } 4478 4479 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4480 { 4481 struct mvneta_port *pp = netdev_priv(dev); 4482 4483 return phylink_mii_ioctl(pp->phylink, ifr, cmd); 4484 } 4485 4486 static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, 4487 struct netlink_ext_ack *extack) 4488 { 4489 bool need_update, running = netif_running(dev); 4490 struct mvneta_port *pp = netdev_priv(dev); 4491 struct bpf_prog *old_prog; 4492 4493 if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { 4494 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP"); 4495 return -EOPNOTSUPP; 4496 } 4497 4498 if (pp->bm_priv) { 4499 NL_SET_ERR_MSG_MOD(extack, 4500 "Hardware Buffer Management not supported on XDP"); 4501 return -EOPNOTSUPP; 4502 } 4503 4504 need_update = !!pp->xdp_prog != !!prog; 4505 if (running && need_update) 4506 mvneta_stop(dev); 4507 4508 old_prog = xchg(&pp->xdp_prog, prog); 4509 if (old_prog) 4510 bpf_prog_put(old_prog); 4511 4512 if (running && need_update) 4513 return mvneta_open(dev); 4514 4515 return 0; 4516 } 4517 4518 static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) 4519 { 4520 switch (xdp->command) { 4521 case XDP_SETUP_PROG: 4522 return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); 4523 default: 4524 return -EINVAL; 4525 } 4526 } 4527 4528 /* Ethtool methods */ 4529 4530 /* Set link ksettings (phy address, speed) for ethtools */ 4531 static int 4532 mvneta_ethtool_set_link_ksettings(struct net_device *ndev, 4533 const struct ethtool_link_ksettings *cmd) 4534 { 4535 struct mvneta_port *pp = netdev_priv(ndev); 4536 4537 return phylink_ethtool_ksettings_set(pp->phylink, cmd); 4538 } 4539 4540 /* Get link ksettings for ethtools */ 4541 static int 4542 mvneta_ethtool_get_link_ksettings(struct net_device *ndev, 4543 struct ethtool_link_ksettings *cmd) 4544 { 4545 struct mvneta_port *pp = netdev_priv(ndev); 4546 4547 return phylink_ethtool_ksettings_get(pp->phylink, cmd); 4548 } 4549 4550 static int mvneta_ethtool_nway_reset(struct net_device *dev) 4551 { 4552 struct mvneta_port *pp = netdev_priv(dev); 4553 4554 return phylink_ethtool_nway_reset(pp->phylink); 4555 } 4556 4557 /* Set interrupt coalescing for ethtools */ 4558 static int 4559 mvneta_ethtool_set_coalesce(struct net_device *dev, 4560 struct ethtool_coalesce *c, 4561 struct kernel_ethtool_coalesce *kernel_coal, 4562 struct netlink_ext_ack *extack) 4563 { 4564 struct mvneta_port *pp = netdev_priv(dev); 4565 int queue; 4566 4567 for (queue = 0; queue < rxq_number; queue++) { 4568 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 4569 rxq->time_coal = c->rx_coalesce_usecs; 4570 rxq->pkts_coal = c->rx_max_coalesced_frames; 4571 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 4572 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 4573 } 4574 4575 for (queue = 0; queue < txq_number; queue++) { 4576 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 4577 txq->done_pkts_coal = c->tx_max_coalesced_frames; 4578 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 4579 } 4580 4581 return 0; 4582 } 4583 4584 /* get coalescing for ethtools */ 4585 static int 4586 mvneta_ethtool_get_coalesce(struct net_device *dev, 4587 struct ethtool_coalesce *c, 4588 struct kernel_ethtool_coalesce *kernel_coal, 4589 struct netlink_ext_ack *extack) 4590 { 4591 struct mvneta_port *pp = netdev_priv(dev); 4592 4593 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; 4594 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; 4595 4596 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; 4597 return 0; 4598 } 4599 4600 4601 static void mvneta_ethtool_get_drvinfo(struct net_device *dev, 4602 struct ethtool_drvinfo *drvinfo) 4603 { 4604 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, 4605 sizeof(drvinfo->driver)); 4606 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, 4607 sizeof(drvinfo->version)); 4608 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 4609 sizeof(drvinfo->bus_info)); 4610 } 4611 4612 4613 static void 4614 mvneta_ethtool_get_ringparam(struct net_device *netdev, 4615 struct ethtool_ringparam *ring, 4616 struct kernel_ethtool_ringparam *kernel_ring, 4617 struct netlink_ext_ack *extack) 4618 { 4619 struct mvneta_port *pp = netdev_priv(netdev); 4620 4621 ring->rx_max_pending = MVNETA_MAX_RXD; 4622 ring->tx_max_pending = MVNETA_MAX_TXD; 4623 ring->rx_pending = pp->rx_ring_size; 4624 ring->tx_pending = pp->tx_ring_size; 4625 } 4626 4627 static int 4628 mvneta_ethtool_set_ringparam(struct net_device *dev, 4629 struct ethtool_ringparam *ring, 4630 struct kernel_ethtool_ringparam *kernel_ring, 4631 struct netlink_ext_ack *extack) 4632 { 4633 struct mvneta_port *pp = netdev_priv(dev); 4634 4635 if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) 4636 return -EINVAL; 4637 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 4638 ring->rx_pending : MVNETA_MAX_RXD; 4639 4640 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, 4641 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); 4642 if (pp->tx_ring_size != ring->tx_pending) 4643 netdev_warn(dev, "TX queue size set to %u (requested %u)\n", 4644 pp->tx_ring_size, ring->tx_pending); 4645 4646 if (netif_running(dev)) { 4647 mvneta_stop(dev); 4648 if (mvneta_open(dev)) { 4649 netdev_err(dev, 4650 "error on opening device after ring param change\n"); 4651 return -ENOMEM; 4652 } 4653 } 4654 4655 return 0; 4656 } 4657 4658 static void mvneta_ethtool_get_pauseparam(struct net_device *dev, 4659 struct ethtool_pauseparam *pause) 4660 { 4661 struct mvneta_port *pp = netdev_priv(dev); 4662 4663 phylink_ethtool_get_pauseparam(pp->phylink, pause); 4664 } 4665 4666 static int mvneta_ethtool_set_pauseparam(struct net_device *dev, 4667 struct ethtool_pauseparam *pause) 4668 { 4669 struct mvneta_port *pp = netdev_priv(dev); 4670 4671 return phylink_ethtool_set_pauseparam(pp->phylink, pause); 4672 } 4673 4674 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, 4675 u8 *data) 4676 { 4677 if (sset == ETH_SS_STATS) { 4678 int i; 4679 4680 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 4681 memcpy(data + i * ETH_GSTRING_LEN, 4682 mvneta_statistics[i].name, ETH_GSTRING_LEN); 4683 } 4684 } 4685 4686 static void 4687 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, 4688 struct mvneta_ethtool_stats *es) 4689 { 4690 unsigned int start; 4691 int cpu; 4692 4693 for_each_possible_cpu(cpu) { 4694 struct mvneta_pcpu_stats *stats; 4695 u64 skb_alloc_error; 4696 u64 refill_error; 4697 u64 xdp_redirect; 4698 u64 xdp_xmit_err; 4699 u64 xdp_tx_err; 4700 u64 xdp_pass; 4701 u64 xdp_drop; 4702 u64 xdp_xmit; 4703 u64 xdp_tx; 4704 4705 stats = per_cpu_ptr(pp->stats, cpu); 4706 do { 4707 start = u64_stats_fetch_begin_irq(&stats->syncp); 4708 skb_alloc_error = stats->es.skb_alloc_error; 4709 refill_error = stats->es.refill_error; 4710 xdp_redirect = stats->es.ps.xdp_redirect; 4711 xdp_pass = stats->es.ps.xdp_pass; 4712 xdp_drop = stats->es.ps.xdp_drop; 4713 xdp_xmit = stats->es.ps.xdp_xmit; 4714 xdp_xmit_err = stats->es.ps.xdp_xmit_err; 4715 xdp_tx = stats->es.ps.xdp_tx; 4716 xdp_tx_err = stats->es.ps.xdp_tx_err; 4717 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 4718 4719 es->skb_alloc_error += skb_alloc_error; 4720 es->refill_error += refill_error; 4721 es->ps.xdp_redirect += xdp_redirect; 4722 es->ps.xdp_pass += xdp_pass; 4723 es->ps.xdp_drop += xdp_drop; 4724 es->ps.xdp_xmit += xdp_xmit; 4725 es->ps.xdp_xmit_err += xdp_xmit_err; 4726 es->ps.xdp_tx += xdp_tx; 4727 es->ps.xdp_tx_err += xdp_tx_err; 4728 } 4729 } 4730 4731 static void mvneta_ethtool_update_stats(struct mvneta_port *pp) 4732 { 4733 struct mvneta_ethtool_stats stats = {}; 4734 const struct mvneta_statistic *s; 4735 void __iomem *base = pp->base; 4736 u32 high, low; 4737 u64 val; 4738 int i; 4739 4740 mvneta_ethtool_update_pcpu_stats(pp, &stats); 4741 for (i = 0, s = mvneta_statistics; 4742 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); 4743 s++, i++) { 4744 switch (s->type) { 4745 case T_REG_32: 4746 val = readl_relaxed(base + s->offset); 4747 pp->ethtool_stats[i] += val; 4748 break; 4749 case T_REG_64: 4750 /* Docs say to read low 32-bit then high */ 4751 low = readl_relaxed(base + s->offset); 4752 high = readl_relaxed(base + s->offset + 4); 4753 val = (u64)high << 32 | low; 4754 pp->ethtool_stats[i] += val; 4755 break; 4756 case T_SW: 4757 switch (s->offset) { 4758 case ETHTOOL_STAT_EEE_WAKEUP: 4759 val = phylink_get_eee_err(pp->phylink); 4760 pp->ethtool_stats[i] += val; 4761 break; 4762 case ETHTOOL_STAT_SKB_ALLOC_ERR: 4763 pp->ethtool_stats[i] = stats.skb_alloc_error; 4764 break; 4765 case ETHTOOL_STAT_REFILL_ERR: 4766 pp->ethtool_stats[i] = stats.refill_error; 4767 break; 4768 case ETHTOOL_XDP_REDIRECT: 4769 pp->ethtool_stats[i] = stats.ps.xdp_redirect; 4770 break; 4771 case ETHTOOL_XDP_PASS: 4772 pp->ethtool_stats[i] = stats.ps.xdp_pass; 4773 break; 4774 case ETHTOOL_XDP_DROP: 4775 pp->ethtool_stats[i] = stats.ps.xdp_drop; 4776 break; 4777 case ETHTOOL_XDP_TX: 4778 pp->ethtool_stats[i] = stats.ps.xdp_tx; 4779 break; 4780 case ETHTOOL_XDP_TX_ERR: 4781 pp->ethtool_stats[i] = stats.ps.xdp_tx_err; 4782 break; 4783 case ETHTOOL_XDP_XMIT: 4784 pp->ethtool_stats[i] = stats.ps.xdp_xmit; 4785 break; 4786 case ETHTOOL_XDP_XMIT_ERR: 4787 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; 4788 break; 4789 } 4790 break; 4791 } 4792 } 4793 } 4794 4795 static void mvneta_ethtool_get_stats(struct net_device *dev, 4796 struct ethtool_stats *stats, u64 *data) 4797 { 4798 struct mvneta_port *pp = netdev_priv(dev); 4799 int i; 4800 4801 mvneta_ethtool_update_stats(pp); 4802 4803 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 4804 *data++ = pp->ethtool_stats[i]; 4805 } 4806 4807 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) 4808 { 4809 if (sset == ETH_SS_STATS) 4810 return ARRAY_SIZE(mvneta_statistics); 4811 return -EOPNOTSUPP; 4812 } 4813 4814 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) 4815 { 4816 return MVNETA_RSS_LU_TABLE_SIZE; 4817 } 4818 4819 static int mvneta_ethtool_get_rxnfc(struct net_device *dev, 4820 struct ethtool_rxnfc *info, 4821 u32 *rules __always_unused) 4822 { 4823 switch (info->cmd) { 4824 case ETHTOOL_GRXRINGS: 4825 info->data = rxq_number; 4826 return 0; 4827 case ETHTOOL_GRXFH: 4828 return -EOPNOTSUPP; 4829 default: 4830 return -EOPNOTSUPP; 4831 } 4832 } 4833 4834 static int mvneta_config_rss(struct mvneta_port *pp) 4835 { 4836 int cpu; 4837 u32 val; 4838 4839 netif_tx_stop_all_queues(pp->dev); 4840 4841 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4842 4843 if (!pp->neta_armada3700) { 4844 /* We have to synchronise on the napi of each CPU */ 4845 for_each_online_cpu(cpu) { 4846 struct mvneta_pcpu_port *pcpu_port = 4847 per_cpu_ptr(pp->ports, cpu); 4848 4849 napi_synchronize(&pcpu_port->napi); 4850 napi_disable(&pcpu_port->napi); 4851 } 4852 } else { 4853 napi_synchronize(&pp->napi); 4854 napi_disable(&pp->napi); 4855 } 4856 4857 pp->rxq_def = pp->indir[0]; 4858 4859 /* Update unicast mapping */ 4860 mvneta_set_rx_mode(pp->dev); 4861 4862 /* Update val of portCfg register accordingly with all RxQueue types */ 4863 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 4864 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 4865 4866 /* Update the elected CPU matching the new rxq_def */ 4867 spin_lock(&pp->lock); 4868 mvneta_percpu_elect(pp); 4869 spin_unlock(&pp->lock); 4870 4871 if (!pp->neta_armada3700) { 4872 /* We have to synchronise on the napi of each CPU */ 4873 for_each_online_cpu(cpu) { 4874 struct mvneta_pcpu_port *pcpu_port = 4875 per_cpu_ptr(pp->ports, cpu); 4876 4877 napi_enable(&pcpu_port->napi); 4878 } 4879 } else { 4880 napi_enable(&pp->napi); 4881 } 4882 4883 netif_tx_start_all_queues(pp->dev); 4884 4885 return 0; 4886 } 4887 4888 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, 4889 const u8 *key, const u8 hfunc) 4890 { 4891 struct mvneta_port *pp = netdev_priv(dev); 4892 4893 /* Current code for Armada 3700 doesn't support RSS features yet */ 4894 if (pp->neta_armada3700) 4895 return -EOPNOTSUPP; 4896 4897 /* We require at least one supported parameter to be changed 4898 * and no change in any of the unsupported parameters 4899 */ 4900 if (key || 4901 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 4902 return -EOPNOTSUPP; 4903 4904 if (!indir) 4905 return 0; 4906 4907 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); 4908 4909 return mvneta_config_rss(pp); 4910 } 4911 4912 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 4913 u8 *hfunc) 4914 { 4915 struct mvneta_port *pp = netdev_priv(dev); 4916 4917 /* Current code for Armada 3700 doesn't support RSS features yet */ 4918 if (pp->neta_armada3700) 4919 return -EOPNOTSUPP; 4920 4921 if (hfunc) 4922 *hfunc = ETH_RSS_HASH_TOP; 4923 4924 if (!indir) 4925 return 0; 4926 4927 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); 4928 4929 return 0; 4930 } 4931 4932 static void mvneta_ethtool_get_wol(struct net_device *dev, 4933 struct ethtool_wolinfo *wol) 4934 { 4935 struct mvneta_port *pp = netdev_priv(dev); 4936 4937 phylink_ethtool_get_wol(pp->phylink, wol); 4938 } 4939 4940 static int mvneta_ethtool_set_wol(struct net_device *dev, 4941 struct ethtool_wolinfo *wol) 4942 { 4943 struct mvneta_port *pp = netdev_priv(dev); 4944 int ret; 4945 4946 ret = phylink_ethtool_set_wol(pp->phylink, wol); 4947 if (!ret) 4948 device_set_wakeup_enable(&dev->dev, !!wol->wolopts); 4949 4950 return ret; 4951 } 4952 4953 static int mvneta_ethtool_get_eee(struct net_device *dev, 4954 struct ethtool_eee *eee) 4955 { 4956 struct mvneta_port *pp = netdev_priv(dev); 4957 u32 lpi_ctl0; 4958 4959 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); 4960 4961 eee->eee_enabled = pp->eee_enabled; 4962 eee->eee_active = pp->eee_active; 4963 eee->tx_lpi_enabled = pp->tx_lpi_enabled; 4964 eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; 4965 4966 return phylink_ethtool_get_eee(pp->phylink, eee); 4967 } 4968 4969 static int mvneta_ethtool_set_eee(struct net_device *dev, 4970 struct ethtool_eee *eee) 4971 { 4972 struct mvneta_port *pp = netdev_priv(dev); 4973 u32 lpi_ctl0; 4974 4975 /* The Armada 37x documents do not give limits for this other than 4976 * it being an 8-bit register. 4977 */ 4978 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) 4979 return -EINVAL; 4980 4981 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); 4982 lpi_ctl0 &= ~(0xff << 8); 4983 lpi_ctl0 |= eee->tx_lpi_timer << 8; 4984 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); 4985 4986 pp->eee_enabled = eee->eee_enabled; 4987 pp->tx_lpi_enabled = eee->tx_lpi_enabled; 4988 4989 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); 4990 4991 return phylink_ethtool_set_eee(pp->phylink, eee); 4992 } 4993 4994 static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) 4995 { 4996 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0); 4997 } 4998 4999 static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq) 5000 { 5001 u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ); 5002 5003 val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7); 5004 val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq); 5005 5006 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val); 5007 } 5008 5009 static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp) 5010 { 5011 unsigned long core_clk_rate; 5012 u32 refill_cycles; 5013 u32 val; 5014 5015 core_clk_rate = clk_get_rate(pp->clk); 5016 if (!core_clk_rate) 5017 return -EINVAL; 5018 5019 refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS / 5020 (NSEC_PER_SEC / core_clk_rate); 5021 5022 if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK) 5023 return -EINVAL; 5024 5025 /* Enable bw limit algorithm version 3 */ 5026 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); 5027 val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); 5028 mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); 5029 5030 /* Set the base refill rate */ 5031 mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles); 5032 5033 return 0; 5034 } 5035 5036 static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp) 5037 { 5038 u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); 5039 5040 val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); 5041 mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); 5042 } 5043 5044 static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue, 5045 u64 min_rate, u64 max_rate) 5046 { 5047 u32 refill_val, rem; 5048 u32 val = 0; 5049 5050 /* Convert to from Bps to bps */ 5051 max_rate *= 8; 5052 5053 if (min_rate) 5054 return -EINVAL; 5055 5056 refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION, 5057 &rem); 5058 5059 if (rem || !refill_val || 5060 refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX) 5061 return -EINVAL; 5062 5063 val = refill_val; 5064 val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD << 5065 MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT); 5066 5067 mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val); 5068 5069 return 0; 5070 } 5071 5072 static int mvneta_setup_mqprio(struct net_device *dev, 5073 struct tc_mqprio_qopt_offload *mqprio) 5074 { 5075 struct mvneta_port *pp = netdev_priv(dev); 5076 int rxq, txq, tc, ret; 5077 u8 num_tc; 5078 5079 if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) 5080 return 0; 5081 5082 num_tc = mqprio->qopt.num_tc; 5083 5084 if (num_tc > rxq_number) 5085 return -EINVAL; 5086 5087 mvneta_clear_rx_prio_map(pp); 5088 5089 if (!num_tc) { 5090 mvneta_disable_per_queue_rate_limit(pp); 5091 netdev_reset_tc(dev); 5092 return 0; 5093 } 5094 5095 netdev_set_num_tc(dev, mqprio->qopt.num_tc); 5096 5097 for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { 5098 netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc], 5099 mqprio->qopt.offset[tc]); 5100 5101 for (rxq = mqprio->qopt.offset[tc]; 5102 rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; 5103 rxq++) { 5104 if (rxq >= rxq_number) 5105 return -EINVAL; 5106 5107 mvneta_map_vlan_prio_to_rxq(pp, tc, rxq); 5108 } 5109 } 5110 5111 if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) { 5112 mvneta_disable_per_queue_rate_limit(pp); 5113 return 0; 5114 } 5115 5116 if (mqprio->qopt.num_tc > txq_number) 5117 return -EINVAL; 5118 5119 ret = mvneta_enable_per_queue_rate_limit(pp); 5120 if (ret) 5121 return ret; 5122 5123 for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { 5124 for (txq = mqprio->qopt.offset[tc]; 5125 txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; 5126 txq++) { 5127 if (txq >= txq_number) 5128 return -EINVAL; 5129 5130 ret = mvneta_setup_queue_rates(pp, txq, 5131 mqprio->min_rate[tc], 5132 mqprio->max_rate[tc]); 5133 if (ret) 5134 return ret; 5135 } 5136 } 5137 5138 return 0; 5139 } 5140 5141 static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type, 5142 void *type_data) 5143 { 5144 switch (type) { 5145 case TC_SETUP_QDISC_MQPRIO: 5146 return mvneta_setup_mqprio(dev, type_data); 5147 default: 5148 return -EOPNOTSUPP; 5149 } 5150 } 5151 5152 static const struct net_device_ops mvneta_netdev_ops = { 5153 .ndo_open = mvneta_open, 5154 .ndo_stop = mvneta_stop, 5155 .ndo_start_xmit = mvneta_tx, 5156 .ndo_set_rx_mode = mvneta_set_rx_mode, 5157 .ndo_set_mac_address = mvneta_set_mac_addr, 5158 .ndo_change_mtu = mvneta_change_mtu, 5159 .ndo_fix_features = mvneta_fix_features, 5160 .ndo_get_stats64 = mvneta_get_stats64, 5161 .ndo_eth_ioctl = mvneta_ioctl, 5162 .ndo_bpf = mvneta_xdp, 5163 .ndo_xdp_xmit = mvneta_xdp_xmit, 5164 .ndo_setup_tc = mvneta_setup_tc, 5165 }; 5166 5167 static const struct ethtool_ops mvneta_eth_tool_ops = { 5168 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 5169 ETHTOOL_COALESCE_MAX_FRAMES, 5170 .nway_reset = mvneta_ethtool_nway_reset, 5171 .get_link = ethtool_op_get_link, 5172 .set_coalesce = mvneta_ethtool_set_coalesce, 5173 .get_coalesce = mvneta_ethtool_get_coalesce, 5174 .get_drvinfo = mvneta_ethtool_get_drvinfo, 5175 .get_ringparam = mvneta_ethtool_get_ringparam, 5176 .set_ringparam = mvneta_ethtool_set_ringparam, 5177 .get_pauseparam = mvneta_ethtool_get_pauseparam, 5178 .set_pauseparam = mvneta_ethtool_set_pauseparam, 5179 .get_strings = mvneta_ethtool_get_strings, 5180 .get_ethtool_stats = mvneta_ethtool_get_stats, 5181 .get_sset_count = mvneta_ethtool_get_sset_count, 5182 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, 5183 .get_rxnfc = mvneta_ethtool_get_rxnfc, 5184 .get_rxfh = mvneta_ethtool_get_rxfh, 5185 .set_rxfh = mvneta_ethtool_set_rxfh, 5186 .get_link_ksettings = mvneta_ethtool_get_link_ksettings, 5187 .set_link_ksettings = mvneta_ethtool_set_link_ksettings, 5188 .get_wol = mvneta_ethtool_get_wol, 5189 .set_wol = mvneta_ethtool_set_wol, 5190 .get_eee = mvneta_ethtool_get_eee, 5191 .set_eee = mvneta_ethtool_set_eee, 5192 }; 5193 5194 /* Initialize hw */ 5195 static int mvneta_init(struct device *dev, struct mvneta_port *pp) 5196 { 5197 int queue; 5198 5199 /* Disable port */ 5200 mvneta_port_disable(pp); 5201 5202 /* Set port default values */ 5203 mvneta_defaults_set(pp); 5204 5205 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); 5206 if (!pp->txqs) 5207 return -ENOMEM; 5208 5209 /* Initialize TX descriptor rings */ 5210 for (queue = 0; queue < txq_number; queue++) { 5211 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5212 txq->id = queue; 5213 txq->size = pp->tx_ring_size; 5214 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 5215 } 5216 5217 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); 5218 if (!pp->rxqs) 5219 return -ENOMEM; 5220 5221 /* Create Rx descriptor rings */ 5222 for (queue = 0; queue < rxq_number; queue++) { 5223 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5224 rxq->id = queue; 5225 rxq->size = pp->rx_ring_size; 5226 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; 5227 rxq->time_coal = MVNETA_RX_COAL_USEC; 5228 rxq->buf_virt_addr 5229 = devm_kmalloc_array(pp->dev->dev.parent, 5230 rxq->size, 5231 sizeof(*rxq->buf_virt_addr), 5232 GFP_KERNEL); 5233 if (!rxq->buf_virt_addr) 5234 return -ENOMEM; 5235 } 5236 5237 return 0; 5238 } 5239 5240 /* platform glue : initialize decoding windows */ 5241 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 5242 const struct mbus_dram_target_info *dram) 5243 { 5244 u32 win_enable; 5245 u32 win_protect; 5246 int i; 5247 5248 for (i = 0; i < 6; i++) { 5249 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 5250 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 5251 5252 if (i < 4) 5253 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 5254 } 5255 5256 win_enable = 0x3f; 5257 win_protect = 0; 5258 5259 if (dram) { 5260 for (i = 0; i < dram->num_cs; i++) { 5261 const struct mbus_dram_window *cs = dram->cs + i; 5262 5263 mvreg_write(pp, MVNETA_WIN_BASE(i), 5264 (cs->base & 0xffff0000) | 5265 (cs->mbus_attr << 8) | 5266 dram->mbus_dram_target_id); 5267 5268 mvreg_write(pp, MVNETA_WIN_SIZE(i), 5269 (cs->size - 1) & 0xffff0000); 5270 5271 win_enable &= ~(1 << i); 5272 win_protect |= 3 << (2 * i); 5273 } 5274 } else { 5275 /* For Armada3700 open default 4GB Mbus window, leaving 5276 * arbitration of target/attribute to a different layer 5277 * of configuration. 5278 */ 5279 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); 5280 win_enable &= ~BIT(0); 5281 win_protect = 3; 5282 } 5283 5284 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 5285 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 5286 } 5287 5288 /* Power up the port */ 5289 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 5290 { 5291 /* MAC Cause register should be cleared */ 5292 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 5293 5294 if (phy_mode != PHY_INTERFACE_MODE_QSGMII && 5295 phy_mode != PHY_INTERFACE_MODE_SGMII && 5296 !phy_interface_mode_is_8023z(phy_mode) && 5297 !phy_interface_mode_is_rgmii(phy_mode)) 5298 return -EINVAL; 5299 5300 return 0; 5301 } 5302 5303 /* Device initialization routine */ 5304 static int mvneta_probe(struct platform_device *pdev) 5305 { 5306 struct device_node *dn = pdev->dev.of_node; 5307 struct device_node *bm_node; 5308 struct mvneta_port *pp; 5309 struct net_device *dev; 5310 struct phylink *phylink; 5311 struct phy *comphy; 5312 char hw_mac_addr[ETH_ALEN]; 5313 phy_interface_t phy_mode; 5314 const char *mac_from; 5315 int tx_csum_limit; 5316 int err; 5317 int cpu; 5318 5319 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port), 5320 txq_number, rxq_number); 5321 if (!dev) 5322 return -ENOMEM; 5323 5324 dev->irq = irq_of_parse_and_map(dn, 0); 5325 if (dev->irq == 0) 5326 return -EINVAL; 5327 5328 err = of_get_phy_mode(dn, &phy_mode); 5329 if (err) { 5330 dev_err(&pdev->dev, "incorrect phy-mode\n"); 5331 goto err_free_irq; 5332 } 5333 5334 comphy = devm_of_phy_get(&pdev->dev, dn, NULL); 5335 if (comphy == ERR_PTR(-EPROBE_DEFER)) { 5336 err = -EPROBE_DEFER; 5337 goto err_free_irq; 5338 } else if (IS_ERR(comphy)) { 5339 comphy = NULL; 5340 } 5341 5342 pp = netdev_priv(dev); 5343 spin_lock_init(&pp->lock); 5344 5345 pp->phylink_config.dev = &dev->dev; 5346 pp->phylink_config.type = PHYLINK_NETDEV; 5347 pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | 5348 MAC_100 | MAC_1000FD | MAC_2500FD; 5349 5350 phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); 5351 __set_bit(PHY_INTERFACE_MODE_QSGMII, 5352 pp->phylink_config.supported_interfaces); 5353 if (comphy) { 5354 /* If a COMPHY is present, we can support any of the serdes 5355 * modes and switch between them. 5356 */ 5357 __set_bit(PHY_INTERFACE_MODE_SGMII, 5358 pp->phylink_config.supported_interfaces); 5359 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 5360 pp->phylink_config.supported_interfaces); 5361 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 5362 pp->phylink_config.supported_interfaces); 5363 } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { 5364 /* No COMPHY, with only 2500BASE-X mode supported */ 5365 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 5366 pp->phylink_config.supported_interfaces); 5367 } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || 5368 phy_mode == PHY_INTERFACE_MODE_SGMII) { 5369 /* No COMPHY, we can switch between 1000BASE-X and SGMII */ 5370 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 5371 pp->phylink_config.supported_interfaces); 5372 __set_bit(PHY_INTERFACE_MODE_SGMII, 5373 pp->phylink_config.supported_interfaces); 5374 } 5375 5376 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, 5377 phy_mode, &mvneta_phylink_ops); 5378 if (IS_ERR(phylink)) { 5379 err = PTR_ERR(phylink); 5380 goto err_free_irq; 5381 } 5382 5383 dev->tx_queue_len = MVNETA_MAX_TXD; 5384 dev->watchdog_timeo = 5 * HZ; 5385 dev->netdev_ops = &mvneta_netdev_ops; 5386 5387 dev->ethtool_ops = &mvneta_eth_tool_ops; 5388 5389 pp->phylink = phylink; 5390 pp->comphy = comphy; 5391 pp->phy_interface = phy_mode; 5392 pp->dn = dn; 5393 5394 pp->rxq_def = rxq_def; 5395 pp->indir[0] = rxq_def; 5396 5397 /* Get special SoC configurations */ 5398 if (of_device_is_compatible(dn, "marvell,armada-3700-neta")) 5399 pp->neta_armada3700 = true; 5400 5401 pp->clk = devm_clk_get(&pdev->dev, "core"); 5402 if (IS_ERR(pp->clk)) 5403 pp->clk = devm_clk_get(&pdev->dev, NULL); 5404 if (IS_ERR(pp->clk)) { 5405 err = PTR_ERR(pp->clk); 5406 goto err_free_phylink; 5407 } 5408 5409 clk_prepare_enable(pp->clk); 5410 5411 pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); 5412 if (!IS_ERR(pp->clk_bus)) 5413 clk_prepare_enable(pp->clk_bus); 5414 5415 pp->base = devm_platform_ioremap_resource(pdev, 0); 5416 if (IS_ERR(pp->base)) { 5417 err = PTR_ERR(pp->base); 5418 goto err_clk; 5419 } 5420 5421 pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; 5422 phylink_set_pcs(phylink, &pp->phylink_pcs); 5423 5424 /* Alloc per-cpu port structure */ 5425 pp->ports = alloc_percpu(struct mvneta_pcpu_port); 5426 if (!pp->ports) { 5427 err = -ENOMEM; 5428 goto err_clk; 5429 } 5430 5431 /* Alloc per-cpu stats */ 5432 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); 5433 if (!pp->stats) { 5434 err = -ENOMEM; 5435 goto err_free_ports; 5436 } 5437 5438 err = of_get_ethdev_address(dn, dev); 5439 if (!err) { 5440 mac_from = "device tree"; 5441 } else { 5442 mvneta_get_mac_addr(pp, hw_mac_addr); 5443 if (is_valid_ether_addr(hw_mac_addr)) { 5444 mac_from = "hardware"; 5445 eth_hw_addr_set(dev, hw_mac_addr); 5446 } else { 5447 mac_from = "random"; 5448 eth_hw_addr_random(dev); 5449 } 5450 } 5451 5452 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { 5453 if (tx_csum_limit < 0 || 5454 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { 5455 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 5456 dev_info(&pdev->dev, 5457 "Wrong TX csum limit in DT, set to %dB\n", 5458 MVNETA_TX_CSUM_DEF_SIZE); 5459 } 5460 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { 5461 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 5462 } else { 5463 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; 5464 } 5465 5466 pp->tx_csum_limit = tx_csum_limit; 5467 5468 pp->dram_target_info = mv_mbus_dram_info(); 5469 /* Armada3700 requires setting default configuration of Mbus 5470 * windows, however without using filled mbus_dram_target_info 5471 * structure. 5472 */ 5473 if (pp->dram_target_info || pp->neta_armada3700) 5474 mvneta_conf_mbus_windows(pp, pp->dram_target_info); 5475 5476 pp->tx_ring_size = MVNETA_MAX_TXD; 5477 pp->rx_ring_size = MVNETA_MAX_RXD; 5478 5479 pp->dev = dev; 5480 SET_NETDEV_DEV(dev, &pdev->dev); 5481 5482 pp->id = global_port_id++; 5483 5484 /* Obtain access to BM resources if enabled and already initialized */ 5485 bm_node = of_parse_phandle(dn, "buffer-manager", 0); 5486 if (bm_node) { 5487 pp->bm_priv = mvneta_bm_get(bm_node); 5488 if (pp->bm_priv) { 5489 err = mvneta_bm_port_init(pdev, pp); 5490 if (err < 0) { 5491 dev_info(&pdev->dev, 5492 "use SW buffer management\n"); 5493 mvneta_bm_put(pp->bm_priv); 5494 pp->bm_priv = NULL; 5495 } 5496 } 5497 /* Set RX packet offset correction for platforms, whose 5498 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit 5499 * platforms and 0B for 32-bit ones. 5500 */ 5501 pp->rx_offset_correction = max(0, 5502 NET_SKB_PAD - 5503 MVNETA_RX_PKT_OFFSET_CORRECTION); 5504 } 5505 of_node_put(bm_node); 5506 5507 /* sw buffer management */ 5508 if (!pp->bm_priv) 5509 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 5510 5511 err = mvneta_init(&pdev->dev, pp); 5512 if (err < 0) 5513 goto err_netdev; 5514 5515 err = mvneta_port_power_up(pp, pp->phy_interface); 5516 if (err < 0) { 5517 dev_err(&pdev->dev, "can't power up port\n"); 5518 goto err_netdev; 5519 } 5520 5521 /* Armada3700 network controller does not support per-cpu 5522 * operation, so only single NAPI should be initialized. 5523 */ 5524 if (pp->neta_armada3700) { 5525 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); 5526 } else { 5527 for_each_present_cpu(cpu) { 5528 struct mvneta_pcpu_port *port = 5529 per_cpu_ptr(pp->ports, cpu); 5530 5531 netif_napi_add(dev, &port->napi, mvneta_poll, 5532 NAPI_POLL_WEIGHT); 5533 port->pp = pp; 5534 } 5535 } 5536 5537 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5538 NETIF_F_TSO | NETIF_F_RXCSUM; 5539 dev->hw_features |= dev->features; 5540 dev->vlan_features |= dev->features; 5541 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 5542 netif_set_gso_max_segs(dev, MVNETA_MAX_TSO_SEGS); 5543 5544 /* MTU range: 68 - 9676 */ 5545 dev->min_mtu = ETH_MIN_MTU; 5546 /* 9676 == 9700 - 20 and rounding to 8 */ 5547 dev->max_mtu = 9676; 5548 5549 err = register_netdev(dev); 5550 if (err < 0) { 5551 dev_err(&pdev->dev, "failed to register\n"); 5552 goto err_netdev; 5553 } 5554 5555 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 5556 dev->dev_addr); 5557 5558 platform_set_drvdata(pdev, pp->dev); 5559 5560 return 0; 5561 5562 err_netdev: 5563 if (pp->bm_priv) { 5564 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 5565 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 5566 1 << pp->id); 5567 mvneta_bm_put(pp->bm_priv); 5568 } 5569 free_percpu(pp->stats); 5570 err_free_ports: 5571 free_percpu(pp->ports); 5572 err_clk: 5573 clk_disable_unprepare(pp->clk_bus); 5574 clk_disable_unprepare(pp->clk); 5575 err_free_phylink: 5576 if (pp->phylink) 5577 phylink_destroy(pp->phylink); 5578 err_free_irq: 5579 irq_dispose_mapping(dev->irq); 5580 return err; 5581 } 5582 5583 /* Device removal routine */ 5584 static int mvneta_remove(struct platform_device *pdev) 5585 { 5586 struct net_device *dev = platform_get_drvdata(pdev); 5587 struct mvneta_port *pp = netdev_priv(dev); 5588 5589 unregister_netdev(dev); 5590 clk_disable_unprepare(pp->clk_bus); 5591 clk_disable_unprepare(pp->clk); 5592 free_percpu(pp->ports); 5593 free_percpu(pp->stats); 5594 irq_dispose_mapping(dev->irq); 5595 phylink_destroy(pp->phylink); 5596 5597 if (pp->bm_priv) { 5598 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 5599 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 5600 1 << pp->id); 5601 mvneta_bm_put(pp->bm_priv); 5602 } 5603 5604 return 0; 5605 } 5606 5607 #ifdef CONFIG_PM_SLEEP 5608 static int mvneta_suspend(struct device *device) 5609 { 5610 int queue; 5611 struct net_device *dev = dev_get_drvdata(device); 5612 struct mvneta_port *pp = netdev_priv(dev); 5613 5614 if (!netif_running(dev)) 5615 goto clean_exit; 5616 5617 if (!pp->neta_armada3700) { 5618 spin_lock(&pp->lock); 5619 pp->is_stopped = true; 5620 spin_unlock(&pp->lock); 5621 5622 cpuhp_state_remove_instance_nocalls(online_hpstate, 5623 &pp->node_online); 5624 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 5625 &pp->node_dead); 5626 } 5627 5628 rtnl_lock(); 5629 mvneta_stop_dev(pp); 5630 rtnl_unlock(); 5631 5632 for (queue = 0; queue < rxq_number; queue++) { 5633 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5634 5635 mvneta_rxq_drop_pkts(pp, rxq); 5636 } 5637 5638 for (queue = 0; queue < txq_number; queue++) { 5639 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5640 5641 mvneta_txq_hw_deinit(pp, txq); 5642 } 5643 5644 clean_exit: 5645 netif_device_detach(dev); 5646 clk_disable_unprepare(pp->clk_bus); 5647 clk_disable_unprepare(pp->clk); 5648 5649 return 0; 5650 } 5651 5652 static int mvneta_resume(struct device *device) 5653 { 5654 struct platform_device *pdev = to_platform_device(device); 5655 struct net_device *dev = dev_get_drvdata(device); 5656 struct mvneta_port *pp = netdev_priv(dev); 5657 int err, queue; 5658 5659 clk_prepare_enable(pp->clk); 5660 if (!IS_ERR(pp->clk_bus)) 5661 clk_prepare_enable(pp->clk_bus); 5662 if (pp->dram_target_info || pp->neta_armada3700) 5663 mvneta_conf_mbus_windows(pp, pp->dram_target_info); 5664 if (pp->bm_priv) { 5665 err = mvneta_bm_port_init(pdev, pp); 5666 if (err < 0) { 5667 dev_info(&pdev->dev, "use SW buffer management\n"); 5668 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 5669 pp->bm_priv = NULL; 5670 } 5671 } 5672 mvneta_defaults_set(pp); 5673 err = mvneta_port_power_up(pp, pp->phy_interface); 5674 if (err < 0) { 5675 dev_err(device, "can't power up port\n"); 5676 return err; 5677 } 5678 5679 netif_device_attach(dev); 5680 5681 if (!netif_running(dev)) 5682 return 0; 5683 5684 for (queue = 0; queue < rxq_number; queue++) { 5685 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5686 5687 rxq->next_desc_to_proc = 0; 5688 mvneta_rxq_hw_init(pp, rxq); 5689 } 5690 5691 for (queue = 0; queue < txq_number; queue++) { 5692 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5693 5694 txq->next_desc_to_proc = 0; 5695 mvneta_txq_hw_init(pp, txq); 5696 } 5697 5698 if (!pp->neta_armada3700) { 5699 spin_lock(&pp->lock); 5700 pp->is_stopped = false; 5701 spin_unlock(&pp->lock); 5702 cpuhp_state_add_instance_nocalls(online_hpstate, 5703 &pp->node_online); 5704 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 5705 &pp->node_dead); 5706 } 5707 5708 rtnl_lock(); 5709 mvneta_start_dev(pp); 5710 rtnl_unlock(); 5711 mvneta_set_rx_mode(dev); 5712 5713 return 0; 5714 } 5715 #endif 5716 5717 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume); 5718 5719 static const struct of_device_id mvneta_match[] = { 5720 { .compatible = "marvell,armada-370-neta" }, 5721 { .compatible = "marvell,armada-xp-neta" }, 5722 { .compatible = "marvell,armada-3700-neta" }, 5723 { } 5724 }; 5725 MODULE_DEVICE_TABLE(of, mvneta_match); 5726 5727 static struct platform_driver mvneta_driver = { 5728 .probe = mvneta_probe, 5729 .remove = mvneta_remove, 5730 .driver = { 5731 .name = MVNETA_DRIVER_NAME, 5732 .of_match_table = mvneta_match, 5733 .pm = &mvneta_pm_ops, 5734 }, 5735 }; 5736 5737 static int __init mvneta_driver_init(void) 5738 { 5739 int ret; 5740 5741 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online", 5742 mvneta_cpu_online, 5743 mvneta_cpu_down_prepare); 5744 if (ret < 0) 5745 goto out; 5746 online_hpstate = ret; 5747 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead", 5748 NULL, mvneta_cpu_dead); 5749 if (ret) 5750 goto err_dead; 5751 5752 ret = platform_driver_register(&mvneta_driver); 5753 if (ret) 5754 goto err; 5755 return 0; 5756 5757 err: 5758 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); 5759 err_dead: 5760 cpuhp_remove_multi_state(online_hpstate); 5761 out: 5762 return ret; 5763 } 5764 module_init(mvneta_driver_init); 5765 5766 static void __exit mvneta_driver_exit(void) 5767 { 5768 platform_driver_unregister(&mvneta_driver); 5769 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); 5770 cpuhp_remove_multi_state(online_hpstate); 5771 } 5772 module_exit(mvneta_driver_exit); 5773 5774 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 5775 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 5776 MODULE_LICENSE("GPL"); 5777 5778 module_param(rxq_number, int, 0444); 5779 module_param(txq_number, int, 0444); 5780 5781 module_param(rxq_def, int, 0444); 5782 module_param(rx_copybreak, int, 0644); 5783