1 /* 2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Rami Rosen <rosenr@marvell.com> 7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 8 * 9 * This file is licensed under the terms of the GNU General Public 10 * License version 2. This program is licensed "as is" without any 11 * warranty of any kind, whether express or implied. 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/cpu.h> 16 #include <linux/etherdevice.h> 17 #include <linux/if_vlan.h> 18 #include <linux/inetdevice.h> 19 #include <linux/interrupt.h> 20 #include <linux/io.h> 21 #include <linux/kernel.h> 22 #include <linux/mbus.h> 23 #include <linux/module.h> 24 #include <linux/netdevice.h> 25 #include <linux/of.h> 26 #include <linux/of_address.h> 27 #include <linux/of_irq.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_net.h> 30 #include <linux/phy/phy.h> 31 #include <linux/phy.h> 32 #include <linux/phylink.h> 33 #include <linux/platform_device.h> 34 #include <linux/skbuff.h> 35 #include <net/hwbm.h> 36 #include "mvneta_bm.h" 37 #include <net/ip.h> 38 #include <net/ipv6.h> 39 #include <net/tso.h> 40 #include <net/page_pool.h> 41 #include <linux/bpf_trace.h> 42 43 /* Registers */ 44 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 45 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) 46 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 47 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 48 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 49 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 50 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 51 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 52 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 53 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) 54 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) 55 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) 56 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 57 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) 58 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) 59 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff 60 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) 61 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 62 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 63 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) 64 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 65 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 66 #define MVNETA_PORT_RX_RESET 0x1cc0 67 #define MVNETA_PORT_RX_DMA_RESET BIT(0) 68 #define MVNETA_PHY_ADDR 0x2000 69 #define MVNETA_PHY_ADDR_MASK 0x1f 70 #define MVNETA_MBUS_RETRY 0x2010 71 #define MVNETA_UNIT_INTR_CAUSE 0x2080 72 #define MVNETA_UNIT_CONTROL 0x20B0 73 #define MVNETA_PHY_POLLING_ENABLE BIT(1) 74 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) 75 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 76 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 77 #define MVNETA_BASE_ADDR_ENABLE 0x2290 78 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 79 #define MVNETA_PORT_CONFIG 0x2400 80 #define MVNETA_UNI_PROMISC_MODE BIT(0) 81 #define MVNETA_DEF_RXQ(q) ((q) << 1) 82 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) 83 #define MVNETA_TX_UNSET_ERR_SUM BIT(12) 84 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) 85 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) 86 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) 87 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) 88 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ 89 MVNETA_DEF_RXQ_ARP(q) | \ 90 MVNETA_DEF_RXQ_TCP(q) | \ 91 MVNETA_DEF_RXQ_UDP(q) | \ 92 MVNETA_DEF_RXQ_BPDU(q) | \ 93 MVNETA_TX_UNSET_ERR_SUM | \ 94 MVNETA_RX_CSUM_WITH_PSEUDO_HDR) 95 #define MVNETA_PORT_CONFIG_EXTEND 0x2404 96 #define MVNETA_MAC_ADDR_LOW 0x2414 97 #define MVNETA_MAC_ADDR_HIGH 0x2418 98 #define MVNETA_SDMA_CONFIG 0x241c 99 #define MVNETA_SDMA_BRST_SIZE_16 4 100 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 101 #define MVNETA_RX_NO_DATA_SWAP BIT(4) 102 #define MVNETA_TX_NO_DATA_SWAP BIT(5) 103 #define MVNETA_DESC_SWAP BIT(6) 104 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 105 #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 106 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) 107 #define MVNETA_PORT_STATUS 0x2444 108 #define MVNETA_TX_IN_PRGRS BIT(0) 109 #define MVNETA_TX_FIFO_EMPTY BIT(8) 110 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c 111 /* Only exists on Armada XP and Armada 370 */ 112 #define MVNETA_SERDES_CFG 0x24A0 113 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 114 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 115 #define MVNETA_HSGMII_SERDES_PROTO 0x1107 116 #define MVNETA_TYPE_PRIO 0x24bc 117 #define MVNETA_FORCE_UNI BIT(21) 118 #define MVNETA_TXQ_CMD_1 0x24e4 119 #define MVNETA_TXQ_CMD 0x2448 120 #define MVNETA_TXQ_DISABLE_SHIFT 8 121 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff 122 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 123 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488 124 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 125 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) 126 #define MVNETA_ACC_MODE 0x2500 127 #define MVNETA_BM_ADDRESS 0x2504 128 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 129 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff 130 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 131 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) 132 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) 133 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) 134 135 /* Exception Interrupt Port/Queue Cause register 136 * 137 * Their behavior depend of the mapping done using the PCPX2Q 138 * registers. For a given CPU if the bit associated to a queue is not 139 * set, then for the register a read from this CPU will always return 140 * 0 and a write won't do anything 141 */ 142 143 #define MVNETA_INTR_NEW_CAUSE 0x25a0 144 #define MVNETA_INTR_NEW_MASK 0x25a4 145 146 /* bits 0..7 = TXQ SENT, one bit per queue. 147 * bits 8..15 = RXQ OCCUP, one bit per queue. 148 * bits 16..23 = RXQ FREE, one bit per queue. 149 * bit 29 = OLD_REG_SUM, see old reg ? 150 * bit 30 = TX_ERR_SUM, one bit for 4 ports 151 * bit 31 = MISC_SUM, one bit for 4 ports 152 */ 153 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) 154 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) 155 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 156 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) 157 #define MVNETA_MISCINTR_INTR_MASK BIT(31) 158 159 #define MVNETA_INTR_OLD_CAUSE 0x25a8 160 #define MVNETA_INTR_OLD_MASK 0x25ac 161 162 /* Data Path Port/Queue Cause Register */ 163 #define MVNETA_INTR_MISC_CAUSE 0x25b0 164 #define MVNETA_INTR_MISC_MASK 0x25b4 165 166 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) 167 #define MVNETA_CAUSE_LINK_CHANGE BIT(1) 168 #define MVNETA_CAUSE_PTP BIT(4) 169 170 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) 171 #define MVNETA_CAUSE_RX_OVERRUN BIT(8) 172 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) 173 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) 174 #define MVNETA_CAUSE_TX_UNDERUN BIT(11) 175 #define MVNETA_CAUSE_PRBS_ERR BIT(12) 176 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) 177 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) 178 179 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 180 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) 181 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) 182 183 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 184 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) 185 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) 186 187 #define MVNETA_INTR_ENABLE 0x25b8 188 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 189 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff 190 191 #define MVNETA_RXQ_CMD 0x2680 192 #define MVNETA_RXQ_DISABLE_SHIFT 8 193 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff 194 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) 195 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) 196 #define MVNETA_GMAC_CTRL_0 0x2c00 197 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 198 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 199 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1) 200 #define MVNETA_GMAC0_PORT_ENABLE BIT(0) 201 #define MVNETA_GMAC_CTRL_2 0x2c08 202 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) 203 #define MVNETA_GMAC2_PCS_ENABLE BIT(3) 204 #define MVNETA_GMAC2_PORT_RGMII BIT(4) 205 #define MVNETA_GMAC2_PORT_RESET BIT(6) 206 #define MVNETA_GMAC_STATUS 0x2c10 207 #define MVNETA_GMAC_LINK_UP BIT(0) 208 #define MVNETA_GMAC_SPEED_1000 BIT(1) 209 #define MVNETA_GMAC_SPEED_100 BIT(2) 210 #define MVNETA_GMAC_FULL_DUPLEX BIT(3) 211 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) 212 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) 213 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) 214 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) 215 #define MVNETA_GMAC_AN_COMPLETE BIT(11) 216 #define MVNETA_GMAC_SYNC_OK BIT(14) 217 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 218 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 219 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 220 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) 221 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3) 222 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4) 223 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 224 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 225 #define MVNETA_GMAC_AN_SPEED_EN BIT(7) 226 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8) 227 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9) 228 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) 229 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 230 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) 231 #define MVNETA_GMAC_CTRL_4 0x2c90 232 #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1) 233 #define MVNETA_MIB_COUNTERS_BASE 0x3000 234 #define MVNETA_MIB_LATE_COLLISION 0x7c 235 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 236 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 237 #define MVNETA_DA_FILT_UCAST_BASE 0x3600 238 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) 239 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) 240 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 241 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) 242 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) 243 #define MVNETA_TXQ_DEC_SENT_SHIFT 16 244 #define MVNETA_TXQ_DEC_SENT_MASK 0xff 245 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) 246 #define MVNETA_TXQ_SENT_DESC_SHIFT 16 247 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 248 #define MVNETA_PORT_TX_RESET 0x3cf0 249 #define MVNETA_PORT_TX_DMA_RESET BIT(0) 250 #define MVNETA_TX_MTU 0x3e0c 251 #define MVNETA_TX_TOKEN_SIZE 0x3e14 252 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff 253 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) 254 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff 255 256 #define MVNETA_LPI_CTRL_0 0x2cc0 257 #define MVNETA_LPI_CTRL_1 0x2cc4 258 #define MVNETA_LPI_REQUEST_ENABLE BIT(0) 259 #define MVNETA_LPI_CTRL_2 0x2cc8 260 #define MVNETA_LPI_STATUS 0x2ccc 261 262 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 263 264 /* Descriptor ring Macros */ 265 #define MVNETA_QUEUE_NEXT_DESC(q, index) \ 266 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 267 268 /* Various constants */ 269 270 /* Coalescing */ 271 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ 272 #define MVNETA_RX_COAL_PKTS 32 273 #define MVNETA_RX_COAL_USEC 100 274 275 /* The two bytes Marvell header. Either contains a special value used 276 * by Marvell switches when a specific hardware mode is enabled (not 277 * supported by this driver) or is filled automatically by zeroes on 278 * the RX side. Those two bytes being at the front of the Ethernet 279 * header, they allow to have the IP header aligned on a 4 bytes 280 * boundary automatically: the hardware skips those two bytes on its 281 * own. 282 */ 283 #define MVNETA_MH_SIZE 2 284 285 #define MVNETA_VLAN_TAG_LEN 4 286 287 #define MVNETA_TX_CSUM_DEF_SIZE 1600 288 #define MVNETA_TX_CSUM_MAX_SIZE 9800 289 #define MVNETA_ACC_MODE_EXT1 1 290 #define MVNETA_ACC_MODE_EXT2 2 291 292 #define MVNETA_MAX_DECODE_WIN 6 293 294 /* Timeout constants */ 295 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 296 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 297 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 298 299 #define MVNETA_TX_MTU_MAX 0x3ffff 300 301 /* The RSS lookup table actually has 256 entries but we do not use 302 * them yet 303 */ 304 #define MVNETA_RSS_LU_TABLE_SIZE 1 305 306 /* Max number of Rx descriptors */ 307 #define MVNETA_MAX_RXD 512 308 309 /* Max number of Tx descriptors */ 310 #define MVNETA_MAX_TXD 1024 311 312 /* Max number of allowed TCP segments for software TSO */ 313 #define MVNETA_MAX_TSO_SEGS 100 314 315 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 316 317 /* descriptor aligned size */ 318 #define MVNETA_DESC_ALIGNED_SIZE 32 319 320 /* Number of bytes to be taken into account by HW when putting incoming data 321 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet 322 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers. 323 */ 324 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64 325 326 #define MVNETA_RX_PKT_SIZE(mtu) \ 327 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 328 ETH_HLEN + ETH_FCS_LEN, \ 329 cache_line_size()) 330 331 /* Driver assumes that the last 3 bits are 0 */ 332 #define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) 333 #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ 334 MVNETA_SKB_HEADROOM)) 335 #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD) 336 337 #define IS_TSO_HEADER(txq, addr) \ 338 ((addr >= txq->tso_hdrs_phys) && \ 339 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) 340 341 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \ 342 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) 343 344 enum { 345 ETHTOOL_STAT_EEE_WAKEUP, 346 ETHTOOL_STAT_SKB_ALLOC_ERR, 347 ETHTOOL_STAT_REFILL_ERR, 348 ETHTOOL_XDP_REDIRECT, 349 ETHTOOL_XDP_PASS, 350 ETHTOOL_XDP_DROP, 351 ETHTOOL_XDP_TX, 352 ETHTOOL_XDP_TX_ERR, 353 ETHTOOL_XDP_XMIT, 354 ETHTOOL_XDP_XMIT_ERR, 355 ETHTOOL_MAX_STATS, 356 }; 357 358 struct mvneta_statistic { 359 unsigned short offset; 360 unsigned short type; 361 const char name[ETH_GSTRING_LEN]; 362 }; 363 364 #define T_REG_32 32 365 #define T_REG_64 64 366 #define T_SW 1 367 368 #define MVNETA_XDP_PASS 0 369 #define MVNETA_XDP_DROPPED BIT(0) 370 #define MVNETA_XDP_TX BIT(1) 371 #define MVNETA_XDP_REDIR BIT(2) 372 373 static const struct mvneta_statistic mvneta_statistics[] = { 374 { 0x3000, T_REG_64, "good_octets_received", }, 375 { 0x3010, T_REG_32, "good_frames_received", }, 376 { 0x3008, T_REG_32, "bad_octets_received", }, 377 { 0x3014, T_REG_32, "bad_frames_received", }, 378 { 0x3018, T_REG_32, "broadcast_frames_received", }, 379 { 0x301c, T_REG_32, "multicast_frames_received", }, 380 { 0x3050, T_REG_32, "unrec_mac_control_received", }, 381 { 0x3058, T_REG_32, "good_fc_received", }, 382 { 0x305c, T_REG_32, "bad_fc_received", }, 383 { 0x3060, T_REG_32, "undersize_received", }, 384 { 0x3064, T_REG_32, "fragments_received", }, 385 { 0x3068, T_REG_32, "oversize_received", }, 386 { 0x306c, T_REG_32, "jabber_received", }, 387 { 0x3070, T_REG_32, "mac_receive_error", }, 388 { 0x3074, T_REG_32, "bad_crc_event", }, 389 { 0x3078, T_REG_32, "collision", }, 390 { 0x307c, T_REG_32, "late_collision", }, 391 { 0x2484, T_REG_32, "rx_discard", }, 392 { 0x2488, T_REG_32, "rx_overrun", }, 393 { 0x3020, T_REG_32, "frames_64_octets", }, 394 { 0x3024, T_REG_32, "frames_65_to_127_octets", }, 395 { 0x3028, T_REG_32, "frames_128_to_255_octets", }, 396 { 0x302c, T_REG_32, "frames_256_to_511_octets", }, 397 { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, 398 { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, 399 { 0x3038, T_REG_64, "good_octets_sent", }, 400 { 0x3040, T_REG_32, "good_frames_sent", }, 401 { 0x3044, T_REG_32, "excessive_collision", }, 402 { 0x3048, T_REG_32, "multicast_frames_sent", }, 403 { 0x304c, T_REG_32, "broadcast_frames_sent", }, 404 { 0x3054, T_REG_32, "fc_sent", }, 405 { 0x300c, T_REG_32, "internal_mac_transmit_err", }, 406 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, 407 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", }, 408 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", }, 409 { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", }, 410 { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", }, 411 { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", }, 412 { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", }, 413 { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", }, 414 { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", }, 415 { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", }, 416 }; 417 418 struct mvneta_stats { 419 u64 rx_packets; 420 u64 rx_bytes; 421 u64 tx_packets; 422 u64 tx_bytes; 423 /* xdp */ 424 u64 xdp_redirect; 425 u64 xdp_pass; 426 u64 xdp_drop; 427 u64 xdp_xmit; 428 u64 xdp_xmit_err; 429 u64 xdp_tx; 430 u64 xdp_tx_err; 431 }; 432 433 struct mvneta_ethtool_stats { 434 struct mvneta_stats ps; 435 u64 skb_alloc_error; 436 u64 refill_error; 437 }; 438 439 struct mvneta_pcpu_stats { 440 struct u64_stats_sync syncp; 441 442 struct mvneta_ethtool_stats es; 443 u64 rx_dropped; 444 u64 rx_errors; 445 }; 446 447 struct mvneta_pcpu_port { 448 /* Pointer to the shared port */ 449 struct mvneta_port *pp; 450 451 /* Pointer to the CPU-local NAPI struct */ 452 struct napi_struct napi; 453 454 /* Cause of the previous interrupt */ 455 u32 cause_rx_tx; 456 }; 457 458 enum { 459 __MVNETA_DOWN, 460 }; 461 462 struct mvneta_port { 463 u8 id; 464 struct mvneta_pcpu_port __percpu *ports; 465 struct mvneta_pcpu_stats __percpu *stats; 466 467 unsigned long state; 468 469 int pkt_size; 470 void __iomem *base; 471 struct mvneta_rx_queue *rxqs; 472 struct mvneta_tx_queue *txqs; 473 struct net_device *dev; 474 struct hlist_node node_online; 475 struct hlist_node node_dead; 476 int rxq_def; 477 /* Protect the access to the percpu interrupt registers, 478 * ensuring that the configuration remains coherent. 479 */ 480 spinlock_t lock; 481 bool is_stopped; 482 483 u32 cause_rx_tx; 484 struct napi_struct napi; 485 486 struct bpf_prog *xdp_prog; 487 488 /* Core clock */ 489 struct clk *clk; 490 /* AXI clock */ 491 struct clk *clk_bus; 492 u8 mcast_count[256]; 493 u16 tx_ring_size; 494 u16 rx_ring_size; 495 u8 prio_tc_map[8]; 496 497 phy_interface_t phy_interface; 498 struct device_node *dn; 499 unsigned int tx_csum_limit; 500 struct phylink *phylink; 501 struct phylink_config phylink_config; 502 struct phy *comphy; 503 504 struct mvneta_bm *bm_priv; 505 struct mvneta_bm_pool *pool_long; 506 struct mvneta_bm_pool *pool_short; 507 int bm_win_id; 508 509 bool eee_enabled; 510 bool eee_active; 511 bool tx_lpi_enabled; 512 513 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; 514 515 u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; 516 517 /* Flags for special SoC configurations */ 518 bool neta_armada3700; 519 u16 rx_offset_correction; 520 const struct mbus_dram_target_info *dram_target_info; 521 }; 522 523 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the 524 * layout of the transmit and reception DMA descriptors, and their 525 * layout is therefore defined by the hardware design 526 */ 527 528 #define MVNETA_TX_L3_OFF_SHIFT 0 529 #define MVNETA_TX_IP_HLEN_SHIFT 8 530 #define MVNETA_TX_L4_UDP BIT(16) 531 #define MVNETA_TX_L3_IP6 BIT(17) 532 #define MVNETA_TXD_IP_CSUM BIT(18) 533 #define MVNETA_TXD_Z_PAD BIT(19) 534 #define MVNETA_TXD_L_DESC BIT(20) 535 #define MVNETA_TXD_F_DESC BIT(21) 536 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ 537 MVNETA_TXD_L_DESC | \ 538 MVNETA_TXD_F_DESC) 539 #define MVNETA_TX_L4_CSUM_FULL BIT(30) 540 #define MVNETA_TX_L4_CSUM_NOT BIT(31) 541 542 #define MVNETA_RXD_ERR_CRC 0x0 543 #define MVNETA_RXD_BM_POOL_SHIFT 13 544 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) 545 #define MVNETA_RXD_ERR_SUMMARY BIT(16) 546 #define MVNETA_RXD_ERR_OVERRUN BIT(17) 547 #define MVNETA_RXD_ERR_LEN BIT(18) 548 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) 549 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) 550 #define MVNETA_RXD_L3_IP4 BIT(25) 551 #define MVNETA_RXD_LAST_DESC BIT(26) 552 #define MVNETA_RXD_FIRST_DESC BIT(27) 553 #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \ 554 MVNETA_RXD_LAST_DESC) 555 #define MVNETA_RXD_L4_CSUM_OK BIT(30) 556 557 #if defined(__LITTLE_ENDIAN) 558 struct mvneta_tx_desc { 559 u32 command; /* Options used by HW for packet transmitting.*/ 560 u16 reserved1; /* csum_l4 (for future use) */ 561 u16 data_size; /* Data size of transmitted packet in bytes */ 562 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 563 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 564 u32 reserved3[4]; /* Reserved - (for future use) */ 565 }; 566 567 struct mvneta_rx_desc { 568 u32 status; /* Info about received packet */ 569 u16 reserved1; /* pnc_info - (for future use, PnC) */ 570 u16 data_size; /* Size of received packet in bytes */ 571 572 u32 buf_phys_addr; /* Physical address of the buffer */ 573 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 574 575 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 576 u16 reserved3; /* prefetch_cmd, for future use */ 577 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 578 579 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 580 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 581 }; 582 #else 583 struct mvneta_tx_desc { 584 u16 data_size; /* Data size of transmitted packet in bytes */ 585 u16 reserved1; /* csum_l4 (for future use) */ 586 u32 command; /* Options used by HW for packet transmitting.*/ 587 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 588 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 589 u32 reserved3[4]; /* Reserved - (for future use) */ 590 }; 591 592 struct mvneta_rx_desc { 593 u16 data_size; /* Size of received packet in bytes */ 594 u16 reserved1; /* pnc_info - (for future use, PnC) */ 595 u32 status; /* Info about received packet */ 596 597 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 598 u32 buf_phys_addr; /* Physical address of the buffer */ 599 600 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 601 u16 reserved3; /* prefetch_cmd, for future use */ 602 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 603 604 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 605 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 606 }; 607 #endif 608 609 enum mvneta_tx_buf_type { 610 MVNETA_TYPE_SKB, 611 MVNETA_TYPE_XDP_TX, 612 MVNETA_TYPE_XDP_NDO, 613 }; 614 615 struct mvneta_tx_buf { 616 enum mvneta_tx_buf_type type; 617 union { 618 struct xdp_frame *xdpf; 619 struct sk_buff *skb; 620 }; 621 }; 622 623 struct mvneta_tx_queue { 624 /* Number of this TX queue, in the range 0-7 */ 625 u8 id; 626 627 /* Number of TX DMA descriptors in the descriptor ring */ 628 int size; 629 630 /* Number of currently used TX DMA descriptor in the 631 * descriptor ring 632 */ 633 int count; 634 int pending; 635 int tx_stop_threshold; 636 int tx_wake_threshold; 637 638 /* Array of transmitted buffers */ 639 struct mvneta_tx_buf *buf; 640 641 /* Index of last TX DMA descriptor that was inserted */ 642 int txq_put_index; 643 644 /* Index of the TX DMA descriptor to be cleaned up */ 645 int txq_get_index; 646 647 u32 done_pkts_coal; 648 649 /* Virtual address of the TX DMA descriptors array */ 650 struct mvneta_tx_desc *descs; 651 652 /* DMA address of the TX DMA descriptors array */ 653 dma_addr_t descs_phys; 654 655 /* Index of the last TX DMA descriptor */ 656 int last_desc; 657 658 /* Index of the next TX DMA descriptor to process */ 659 int next_desc_to_proc; 660 661 /* DMA buffers for TSO headers */ 662 char *tso_hdrs; 663 664 /* DMA address of TSO headers */ 665 dma_addr_t tso_hdrs_phys; 666 667 /* Affinity mask for CPUs*/ 668 cpumask_t affinity_mask; 669 }; 670 671 struct mvneta_rx_queue { 672 /* rx queue number, in the range 0-7 */ 673 u8 id; 674 675 /* num of rx descriptors in the rx descriptor ring */ 676 int size; 677 678 u32 pkts_coal; 679 u32 time_coal; 680 681 /* page_pool */ 682 struct page_pool *page_pool; 683 struct xdp_rxq_info xdp_rxq; 684 685 /* Virtual address of the RX buffer */ 686 void **buf_virt_addr; 687 688 /* Virtual address of the RX DMA descriptors array */ 689 struct mvneta_rx_desc *descs; 690 691 /* DMA address of the RX DMA descriptors array */ 692 dma_addr_t descs_phys; 693 694 /* Index of the last RX DMA descriptor */ 695 int last_desc; 696 697 /* Index of the next RX DMA descriptor to process */ 698 int next_desc_to_proc; 699 700 /* Index of first RX DMA descriptor to refill */ 701 int first_to_refill; 702 u32 refill_num; 703 }; 704 705 static enum cpuhp_state online_hpstate; 706 /* The hardware supports eight (8) rx queues, but we are only allowing 707 * the first one to be used. Therefore, let's just allocate one queue. 708 */ 709 static int rxq_number = 8; 710 static int txq_number = 8; 711 712 static int rxq_def; 713 714 static int rx_copybreak __read_mostly = 256; 715 716 /* HW BM need that each port be identify by a unique ID */ 717 static int global_port_id; 718 719 #define MVNETA_DRIVER_NAME "mvneta" 720 #define MVNETA_DRIVER_VERSION "1.0" 721 722 /* Utility/helper methods */ 723 724 /* Write helper method */ 725 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) 726 { 727 writel(data, pp->base + offset); 728 } 729 730 /* Read helper method */ 731 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) 732 { 733 return readl(pp->base + offset); 734 } 735 736 /* Increment txq get counter */ 737 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) 738 { 739 txq->txq_get_index++; 740 if (txq->txq_get_index == txq->size) 741 txq->txq_get_index = 0; 742 } 743 744 /* Increment txq put counter */ 745 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) 746 { 747 txq->txq_put_index++; 748 if (txq->txq_put_index == txq->size) 749 txq->txq_put_index = 0; 750 } 751 752 753 /* Clear all MIB counters */ 754 static void mvneta_mib_counters_clear(struct mvneta_port *pp) 755 { 756 int i; 757 758 /* Perform dummy reads from MIB counters */ 759 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) 760 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); 761 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); 762 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); 763 } 764 765 /* Get System Network Statistics */ 766 static void 767 mvneta_get_stats64(struct net_device *dev, 768 struct rtnl_link_stats64 *stats) 769 { 770 struct mvneta_port *pp = netdev_priv(dev); 771 unsigned int start; 772 int cpu; 773 774 for_each_possible_cpu(cpu) { 775 struct mvneta_pcpu_stats *cpu_stats; 776 u64 rx_packets; 777 u64 rx_bytes; 778 u64 rx_dropped; 779 u64 rx_errors; 780 u64 tx_packets; 781 u64 tx_bytes; 782 783 cpu_stats = per_cpu_ptr(pp->stats, cpu); 784 do { 785 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 786 rx_packets = cpu_stats->es.ps.rx_packets; 787 rx_bytes = cpu_stats->es.ps.rx_bytes; 788 rx_dropped = cpu_stats->rx_dropped; 789 rx_errors = cpu_stats->rx_errors; 790 tx_packets = cpu_stats->es.ps.tx_packets; 791 tx_bytes = cpu_stats->es.ps.tx_bytes; 792 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 793 794 stats->rx_packets += rx_packets; 795 stats->rx_bytes += rx_bytes; 796 stats->rx_dropped += rx_dropped; 797 stats->rx_errors += rx_errors; 798 stats->tx_packets += tx_packets; 799 stats->tx_bytes += tx_bytes; 800 } 801 802 stats->tx_dropped = dev->stats.tx_dropped; 803 } 804 805 /* Rx descriptors helper methods */ 806 807 /* Checks whether the RX descriptor having this status is both the first 808 * and the last descriptor for the RX packet. Each RX packet is currently 809 * received through a single RX descriptor, so not having each RX 810 * descriptor with its first and last bits set is an error 811 */ 812 static int mvneta_rxq_desc_is_first_last(u32 status) 813 { 814 return (status & MVNETA_RXD_FIRST_LAST_DESC) == 815 MVNETA_RXD_FIRST_LAST_DESC; 816 } 817 818 /* Add number of descriptors ready to receive new packets */ 819 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, 820 struct mvneta_rx_queue *rxq, 821 int ndescs) 822 { 823 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can 824 * be added at once 825 */ 826 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { 827 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 828 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << 829 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 830 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; 831 } 832 833 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 834 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 835 } 836 837 /* Get number of RX descriptors occupied by received packets */ 838 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, 839 struct mvneta_rx_queue *rxq) 840 { 841 u32 val; 842 843 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); 844 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; 845 } 846 847 /* Update num of rx desc called upon return from rx path or 848 * from mvneta_rxq_drop_pkts(). 849 */ 850 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, 851 struct mvneta_rx_queue *rxq, 852 int rx_done, int rx_filled) 853 { 854 u32 val; 855 856 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { 857 val = rx_done | 858 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); 859 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 860 return; 861 } 862 863 /* Only 255 descriptors can be added at once */ 864 while ((rx_done > 0) || (rx_filled > 0)) { 865 if (rx_done <= 0xff) { 866 val = rx_done; 867 rx_done = 0; 868 } else { 869 val = 0xff; 870 rx_done -= 0xff; 871 } 872 if (rx_filled <= 0xff) { 873 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 874 rx_filled = 0; 875 } else { 876 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 877 rx_filled -= 0xff; 878 } 879 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 880 } 881 } 882 883 /* Get pointer to next RX descriptor to be processed by SW */ 884 static struct mvneta_rx_desc * 885 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) 886 { 887 int rx_desc = rxq->next_desc_to_proc; 888 889 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); 890 prefetch(rxq->descs + rxq->next_desc_to_proc); 891 return rxq->descs + rx_desc; 892 } 893 894 /* Change maximum receive size of the port. */ 895 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) 896 { 897 u32 val; 898 899 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 900 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; 901 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << 902 MVNETA_GMAC_MAX_RX_SIZE_SHIFT; 903 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 904 } 905 906 907 /* Set rx queue offset */ 908 static void mvneta_rxq_offset_set(struct mvneta_port *pp, 909 struct mvneta_rx_queue *rxq, 910 int offset) 911 { 912 u32 val; 913 914 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 915 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; 916 917 /* Offset is in */ 918 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); 919 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 920 } 921 922 923 /* Tx descriptors helper methods */ 924 925 /* Update HW with number of TX descriptors to be sent */ 926 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, 927 struct mvneta_tx_queue *txq, 928 int pend_desc) 929 { 930 u32 val; 931 932 pend_desc += txq->pending; 933 934 /* Only 255 Tx descriptors can be added at once */ 935 do { 936 val = min(pend_desc, 255); 937 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 938 pend_desc -= val; 939 } while (pend_desc > 0); 940 txq->pending = 0; 941 } 942 943 /* Get pointer to next TX descriptor to be processed (send) by HW */ 944 static struct mvneta_tx_desc * 945 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) 946 { 947 int tx_desc = txq->next_desc_to_proc; 948 949 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); 950 return txq->descs + tx_desc; 951 } 952 953 /* Release the last allocated TX descriptor. Useful to handle DMA 954 * mapping failures in the TX path. 955 */ 956 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) 957 { 958 if (txq->next_desc_to_proc == 0) 959 txq->next_desc_to_proc = txq->last_desc - 1; 960 else 961 txq->next_desc_to_proc--; 962 } 963 964 /* Set rxq buf size */ 965 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, 966 struct mvneta_rx_queue *rxq, 967 int buf_size) 968 { 969 u32 val; 970 971 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); 972 973 val &= ~MVNETA_RXQ_BUF_SIZE_MASK; 974 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); 975 976 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); 977 } 978 979 /* Disable buffer management (BM) */ 980 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, 981 struct mvneta_rx_queue *rxq) 982 { 983 u32 val; 984 985 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 986 val &= ~MVNETA_RXQ_HW_BUF_ALLOC; 987 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 988 } 989 990 /* Enable buffer management (BM) */ 991 static void mvneta_rxq_bm_enable(struct mvneta_port *pp, 992 struct mvneta_rx_queue *rxq) 993 { 994 u32 val; 995 996 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 997 val |= MVNETA_RXQ_HW_BUF_ALLOC; 998 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 999 } 1000 1001 /* Notify HW about port's assignment of pool for bigger packets */ 1002 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, 1003 struct mvneta_rx_queue *rxq) 1004 { 1005 u32 val; 1006 1007 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1008 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; 1009 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); 1010 1011 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1012 } 1013 1014 /* Notify HW about port's assignment of pool for smaller packets */ 1015 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, 1016 struct mvneta_rx_queue *rxq) 1017 { 1018 u32 val; 1019 1020 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1021 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; 1022 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); 1023 1024 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1025 } 1026 1027 /* Set port's receive buffer size for assigned BM pool */ 1028 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, 1029 int buf_size, 1030 u8 pool_id) 1031 { 1032 u32 val; 1033 1034 if (!IS_ALIGNED(buf_size, 8)) { 1035 dev_warn(pp->dev->dev.parent, 1036 "illegal buf_size value %d, round to %d\n", 1037 buf_size, ALIGN(buf_size, 8)); 1038 buf_size = ALIGN(buf_size, 8); 1039 } 1040 1041 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); 1042 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; 1043 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); 1044 } 1045 1046 /* Configure MBUS window in order to enable access BM internal SRAM */ 1047 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, 1048 u8 target, u8 attr) 1049 { 1050 u32 win_enable, win_protect; 1051 int i; 1052 1053 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); 1054 1055 if (pp->bm_win_id < 0) { 1056 /* Find first not occupied window */ 1057 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { 1058 if (win_enable & (1 << i)) { 1059 pp->bm_win_id = i; 1060 break; 1061 } 1062 } 1063 if (i == MVNETA_MAX_DECODE_WIN) 1064 return -ENOMEM; 1065 } else { 1066 i = pp->bm_win_id; 1067 } 1068 1069 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 1070 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 1071 1072 if (i < 4) 1073 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 1074 1075 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | 1076 (attr << 8) | target); 1077 1078 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); 1079 1080 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); 1081 win_protect |= 3 << (2 * i); 1082 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 1083 1084 win_enable &= ~(1 << i); 1085 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 1086 1087 return 0; 1088 } 1089 1090 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) 1091 { 1092 u32 wsize; 1093 u8 target, attr; 1094 int err; 1095 1096 /* Get BM window information */ 1097 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, 1098 &target, &attr); 1099 if (err < 0) 1100 return err; 1101 1102 pp->bm_win_id = -1; 1103 1104 /* Open NETA -> BM window */ 1105 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, 1106 target, attr); 1107 if (err < 0) { 1108 netdev_info(pp->dev, "fail to configure mbus window to BM\n"); 1109 return err; 1110 } 1111 return 0; 1112 } 1113 1114 /* Assign and initialize pools for port. In case of fail 1115 * buffer manager will remain disabled for current port. 1116 */ 1117 static int mvneta_bm_port_init(struct platform_device *pdev, 1118 struct mvneta_port *pp) 1119 { 1120 struct device_node *dn = pdev->dev.of_node; 1121 u32 long_pool_id, short_pool_id; 1122 1123 if (!pp->neta_armada3700) { 1124 int ret; 1125 1126 ret = mvneta_bm_port_mbus_init(pp); 1127 if (ret) 1128 return ret; 1129 } 1130 1131 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { 1132 netdev_info(pp->dev, "missing long pool id\n"); 1133 return -EINVAL; 1134 } 1135 1136 /* Create port's long pool depending on mtu */ 1137 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, 1138 MVNETA_BM_LONG, pp->id, 1139 MVNETA_RX_PKT_SIZE(pp->dev->mtu)); 1140 if (!pp->pool_long) { 1141 netdev_info(pp->dev, "fail to obtain long pool for port\n"); 1142 return -ENOMEM; 1143 } 1144 1145 pp->pool_long->port_map |= 1 << pp->id; 1146 1147 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, 1148 pp->pool_long->id); 1149 1150 /* If short pool id is not defined, assume using single pool */ 1151 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) 1152 short_pool_id = long_pool_id; 1153 1154 /* Create port's short pool */ 1155 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, 1156 MVNETA_BM_SHORT, pp->id, 1157 MVNETA_BM_SHORT_PKT_SIZE); 1158 if (!pp->pool_short) { 1159 netdev_info(pp->dev, "fail to obtain short pool for port\n"); 1160 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 1161 return -ENOMEM; 1162 } 1163 1164 if (short_pool_id != long_pool_id) { 1165 pp->pool_short->port_map |= 1 << pp->id; 1166 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, 1167 pp->pool_short->id); 1168 } 1169 1170 return 0; 1171 } 1172 1173 /* Update settings of a pool for bigger packets */ 1174 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) 1175 { 1176 struct mvneta_bm_pool *bm_pool = pp->pool_long; 1177 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; 1178 int num; 1179 1180 /* Release all buffers from long pool */ 1181 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); 1182 if (hwbm_pool->buf_num) { 1183 WARN(1, "cannot free all buffers in pool %d\n", 1184 bm_pool->id); 1185 goto bm_mtu_err; 1186 } 1187 1188 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); 1189 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); 1190 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1191 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); 1192 1193 /* Fill entire long pool */ 1194 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); 1195 if (num != hwbm_pool->size) { 1196 WARN(1, "pool %d: %d of %d allocated\n", 1197 bm_pool->id, num, hwbm_pool->size); 1198 goto bm_mtu_err; 1199 } 1200 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); 1201 1202 return; 1203 1204 bm_mtu_err: 1205 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 1206 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); 1207 1208 pp->bm_priv = NULL; 1209 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 1210 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); 1211 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); 1212 } 1213 1214 /* Start the Ethernet port RX and TX activity */ 1215 static void mvneta_port_up(struct mvneta_port *pp) 1216 { 1217 int queue; 1218 u32 q_map; 1219 1220 /* Enable all initialized TXs. */ 1221 q_map = 0; 1222 for (queue = 0; queue < txq_number; queue++) { 1223 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 1224 if (txq->descs) 1225 q_map |= (1 << queue); 1226 } 1227 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 1228 1229 q_map = 0; 1230 /* Enable all initialized RXQs. */ 1231 for (queue = 0; queue < rxq_number; queue++) { 1232 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 1233 1234 if (rxq->descs) 1235 q_map |= (1 << queue); 1236 } 1237 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); 1238 } 1239 1240 /* Stop the Ethernet port activity */ 1241 static void mvneta_port_down(struct mvneta_port *pp) 1242 { 1243 u32 val; 1244 int count; 1245 1246 /* Stop Rx port activity. Check port Rx activity. */ 1247 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; 1248 1249 /* Issue stop command for active channels only */ 1250 if (val != 0) 1251 mvreg_write(pp, MVNETA_RXQ_CMD, 1252 val << MVNETA_RXQ_DISABLE_SHIFT); 1253 1254 /* Wait for all Rx activity to terminate. */ 1255 count = 0; 1256 do { 1257 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { 1258 netdev_warn(pp->dev, 1259 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", 1260 val); 1261 break; 1262 } 1263 mdelay(1); 1264 1265 val = mvreg_read(pp, MVNETA_RXQ_CMD); 1266 } while (val & MVNETA_RXQ_ENABLE_MASK); 1267 1268 /* Stop Tx port activity. Check port Tx activity. Issue stop 1269 * command for active channels only 1270 */ 1271 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; 1272 1273 if (val != 0) 1274 mvreg_write(pp, MVNETA_TXQ_CMD, 1275 (val << MVNETA_TXQ_DISABLE_SHIFT)); 1276 1277 /* Wait for all Tx activity to terminate. */ 1278 count = 0; 1279 do { 1280 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { 1281 netdev_warn(pp->dev, 1282 "TIMEOUT for TX stopped status=0x%08x\n", 1283 val); 1284 break; 1285 } 1286 mdelay(1); 1287 1288 /* Check TX Command reg that all Txqs are stopped */ 1289 val = mvreg_read(pp, MVNETA_TXQ_CMD); 1290 1291 } while (val & MVNETA_TXQ_ENABLE_MASK); 1292 1293 /* Double check to verify that TX FIFO is empty */ 1294 count = 0; 1295 do { 1296 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { 1297 netdev_warn(pp->dev, 1298 "TX FIFO empty timeout status=0x%08x\n", 1299 val); 1300 break; 1301 } 1302 mdelay(1); 1303 1304 val = mvreg_read(pp, MVNETA_PORT_STATUS); 1305 } while (!(val & MVNETA_TX_FIFO_EMPTY) && 1306 (val & MVNETA_TX_IN_PRGRS)); 1307 1308 udelay(200); 1309 } 1310 1311 /* Enable the port by setting the port enable bit of the MAC control register */ 1312 static void mvneta_port_enable(struct mvneta_port *pp) 1313 { 1314 u32 val; 1315 1316 /* Enable port */ 1317 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 1318 val |= MVNETA_GMAC0_PORT_ENABLE; 1319 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1320 } 1321 1322 /* Disable the port and wait for about 200 usec before retuning */ 1323 static void mvneta_port_disable(struct mvneta_port *pp) 1324 { 1325 u32 val; 1326 1327 /* Reset the Enable bit in the Serial Control Register */ 1328 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 1329 val &= ~MVNETA_GMAC0_PORT_ENABLE; 1330 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1331 1332 udelay(200); 1333 } 1334 1335 /* Multicast tables methods */ 1336 1337 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ 1338 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) 1339 { 1340 int offset; 1341 u32 val; 1342 1343 if (queue == -1) { 1344 val = 0; 1345 } else { 1346 val = 0x1 | (queue << 1); 1347 val |= (val << 24) | (val << 16) | (val << 8); 1348 } 1349 1350 for (offset = 0; offset <= 0xc; offset += 4) 1351 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); 1352 } 1353 1354 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ 1355 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) 1356 { 1357 int offset; 1358 u32 val; 1359 1360 if (queue == -1) { 1361 val = 0; 1362 } else { 1363 val = 0x1 | (queue << 1); 1364 val |= (val << 24) | (val << 16) | (val << 8); 1365 } 1366 1367 for (offset = 0; offset <= 0xfc; offset += 4) 1368 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); 1369 1370 } 1371 1372 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ 1373 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) 1374 { 1375 int offset; 1376 u32 val; 1377 1378 if (queue == -1) { 1379 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); 1380 val = 0; 1381 } else { 1382 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); 1383 val = 0x1 | (queue << 1); 1384 val |= (val << 24) | (val << 16) | (val << 8); 1385 } 1386 1387 for (offset = 0; offset <= 0xfc; offset += 4) 1388 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); 1389 } 1390 1391 static void mvneta_percpu_unmask_interrupt(void *arg) 1392 { 1393 struct mvneta_port *pp = arg; 1394 1395 /* All the queue are unmasked, but actually only the ones 1396 * mapped to this CPU will be unmasked 1397 */ 1398 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 1399 MVNETA_RX_INTR_MASK_ALL | 1400 MVNETA_TX_INTR_MASK_ALL | 1401 MVNETA_MISCINTR_INTR_MASK); 1402 } 1403 1404 static void mvneta_percpu_mask_interrupt(void *arg) 1405 { 1406 struct mvneta_port *pp = arg; 1407 1408 /* All the queue are masked, but actually only the ones 1409 * mapped to this CPU will be masked 1410 */ 1411 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1412 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 1413 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 1414 } 1415 1416 static void mvneta_percpu_clear_intr_cause(void *arg) 1417 { 1418 struct mvneta_port *pp = arg; 1419 1420 /* All the queue are cleared, but actually only the ones 1421 * mapped to this CPU will be cleared 1422 */ 1423 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1424 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 1425 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 1426 } 1427 1428 /* This method sets defaults to the NETA port: 1429 * Clears interrupt Cause and Mask registers. 1430 * Clears all MAC tables. 1431 * Sets defaults to all registers. 1432 * Resets RX and TX descriptor rings. 1433 * Resets PHY. 1434 * This method can be called after mvneta_port_down() to return the port 1435 * settings to defaults. 1436 */ 1437 static void mvneta_defaults_set(struct mvneta_port *pp) 1438 { 1439 int cpu; 1440 int queue; 1441 u32 val; 1442 int max_cpu = num_present_cpus(); 1443 1444 /* Clear all Cause registers */ 1445 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); 1446 1447 /* Mask all interrupts */ 1448 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 1449 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1450 1451 /* Enable MBUS Retry bit16 */ 1452 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); 1453 1454 /* Set CPU queue access map. CPUs are assigned to the RX and 1455 * TX queues modulo their number. If there is only one TX 1456 * queue then it is assigned to the CPU associated to the 1457 * default RX queue. 1458 */ 1459 for_each_present_cpu(cpu) { 1460 int rxq_map = 0, txq_map = 0; 1461 int rxq, txq; 1462 if (!pp->neta_armada3700) { 1463 for (rxq = 0; rxq < rxq_number; rxq++) 1464 if ((rxq % max_cpu) == cpu) 1465 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 1466 1467 for (txq = 0; txq < txq_number; txq++) 1468 if ((txq % max_cpu) == cpu) 1469 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); 1470 1471 /* With only one TX queue we configure a special case 1472 * which will allow to get all the irq on a single 1473 * CPU 1474 */ 1475 if (txq_number == 1) 1476 txq_map = (cpu == pp->rxq_def) ? 1477 MVNETA_CPU_TXQ_ACCESS(1) : 0; 1478 1479 } else { 1480 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 1481 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK; 1482 } 1483 1484 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 1485 } 1486 1487 /* Reset RX and TX DMAs */ 1488 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 1489 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 1490 1491 /* Disable Legacy WRR, Disable EJP, Release from reset */ 1492 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); 1493 for (queue = 0; queue < txq_number; queue++) { 1494 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); 1495 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); 1496 } 1497 1498 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 1499 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 1500 1501 /* Set Port Acceleration Mode */ 1502 if (pp->bm_priv) 1503 /* HW buffer management + legacy parser */ 1504 val = MVNETA_ACC_MODE_EXT2; 1505 else 1506 /* SW buffer management + legacy parser */ 1507 val = MVNETA_ACC_MODE_EXT1; 1508 mvreg_write(pp, MVNETA_ACC_MODE, val); 1509 1510 if (pp->bm_priv) 1511 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); 1512 1513 /* Update val of portCfg register accordingly with all RxQueue types */ 1514 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 1515 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 1516 1517 val = 0; 1518 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); 1519 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); 1520 1521 /* Build PORT_SDMA_CONFIG_REG */ 1522 val = 0; 1523 1524 /* Default burst size */ 1525 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 1526 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 1527 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; 1528 1529 #if defined(__BIG_ENDIAN) 1530 val |= MVNETA_DESC_SWAP; 1531 #endif 1532 1533 /* Assign port SDMA configuration */ 1534 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 1535 1536 /* Disable PHY polling in hardware, since we're using the 1537 * kernel phylib to do this. 1538 */ 1539 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); 1540 val &= ~MVNETA_PHY_POLLING_ENABLE; 1541 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); 1542 1543 mvneta_set_ucast_table(pp, -1); 1544 mvneta_set_special_mcast_table(pp, -1); 1545 mvneta_set_other_mcast_table(pp, -1); 1546 1547 /* Set port interrupt enable register - default enable all */ 1548 mvreg_write(pp, MVNETA_INTR_ENABLE, 1549 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK 1550 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); 1551 1552 mvneta_mib_counters_clear(pp); 1553 } 1554 1555 /* Set max sizes for tx queues */ 1556 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) 1557 1558 { 1559 u32 val, size, mtu; 1560 int queue; 1561 1562 mtu = max_tx_size * 8; 1563 if (mtu > MVNETA_TX_MTU_MAX) 1564 mtu = MVNETA_TX_MTU_MAX; 1565 1566 /* Set MTU */ 1567 val = mvreg_read(pp, MVNETA_TX_MTU); 1568 val &= ~MVNETA_TX_MTU_MAX; 1569 val |= mtu; 1570 mvreg_write(pp, MVNETA_TX_MTU, val); 1571 1572 /* TX token size and all TXQs token size must be larger that MTU */ 1573 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); 1574 1575 size = val & MVNETA_TX_TOKEN_SIZE_MAX; 1576 if (size < mtu) { 1577 size = mtu; 1578 val &= ~MVNETA_TX_TOKEN_SIZE_MAX; 1579 val |= size; 1580 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); 1581 } 1582 for (queue = 0; queue < txq_number; queue++) { 1583 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); 1584 1585 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; 1586 if (size < mtu) { 1587 size = mtu; 1588 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; 1589 val |= size; 1590 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); 1591 } 1592 } 1593 } 1594 1595 /* Set unicast address */ 1596 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, 1597 int queue) 1598 { 1599 unsigned int unicast_reg; 1600 unsigned int tbl_offset; 1601 unsigned int reg_offset; 1602 1603 /* Locate the Unicast table entry */ 1604 last_nibble = (0xf & last_nibble); 1605 1606 /* offset from unicast tbl base */ 1607 tbl_offset = (last_nibble / 4) * 4; 1608 1609 /* offset within the above reg */ 1610 reg_offset = last_nibble % 4; 1611 1612 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); 1613 1614 if (queue == -1) { 1615 /* Clear accepts frame bit at specified unicast DA tbl entry */ 1616 unicast_reg &= ~(0xff << (8 * reg_offset)); 1617 } else { 1618 unicast_reg &= ~(0xff << (8 * reg_offset)); 1619 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1620 } 1621 1622 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); 1623 } 1624 1625 /* Set mac address */ 1626 static void mvneta_mac_addr_set(struct mvneta_port *pp, 1627 const unsigned char *addr, int queue) 1628 { 1629 unsigned int mac_h; 1630 unsigned int mac_l; 1631 1632 if (queue != -1) { 1633 mac_l = (addr[4] << 8) | (addr[5]); 1634 mac_h = (addr[0] << 24) | (addr[1] << 16) | 1635 (addr[2] << 8) | (addr[3] << 0); 1636 1637 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); 1638 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); 1639 } 1640 1641 /* Accept frames of this address */ 1642 mvneta_set_ucast_addr(pp, addr[5], queue); 1643 } 1644 1645 /* Set the number of packets that will be received before RX interrupt 1646 * will be generated by HW. 1647 */ 1648 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, 1649 struct mvneta_rx_queue *rxq, u32 value) 1650 { 1651 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), 1652 value | MVNETA_RXQ_NON_OCCUPIED(0)); 1653 } 1654 1655 /* Set the time delay in usec before RX interrupt will be generated by 1656 * HW. 1657 */ 1658 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, 1659 struct mvneta_rx_queue *rxq, u32 value) 1660 { 1661 u32 val; 1662 unsigned long clk_rate; 1663 1664 clk_rate = clk_get_rate(pp->clk); 1665 val = (clk_rate / 1000000) * value; 1666 1667 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); 1668 } 1669 1670 /* Set threshold for TX_DONE pkts coalescing */ 1671 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, 1672 struct mvneta_tx_queue *txq, u32 value) 1673 { 1674 u32 val; 1675 1676 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); 1677 1678 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; 1679 val |= MVNETA_TXQ_SENT_THRESH_MASK(value); 1680 1681 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); 1682 } 1683 1684 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ 1685 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, 1686 u32 phys_addr, void *virt_addr, 1687 struct mvneta_rx_queue *rxq) 1688 { 1689 int i; 1690 1691 rx_desc->buf_phys_addr = phys_addr; 1692 i = rx_desc - rxq->descs; 1693 rxq->buf_virt_addr[i] = virt_addr; 1694 } 1695 1696 /* Decrement sent descriptors counter */ 1697 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, 1698 struct mvneta_tx_queue *txq, 1699 int sent_desc) 1700 { 1701 u32 val; 1702 1703 /* Only 255 TX descriptors can be updated at once */ 1704 while (sent_desc > 0xff) { 1705 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; 1706 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1707 sent_desc = sent_desc - 0xff; 1708 } 1709 1710 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; 1711 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1712 } 1713 1714 /* Get number of TX descriptors already sent by HW */ 1715 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, 1716 struct mvneta_tx_queue *txq) 1717 { 1718 u32 val; 1719 int sent_desc; 1720 1721 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); 1722 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> 1723 MVNETA_TXQ_SENT_DESC_SHIFT; 1724 1725 return sent_desc; 1726 } 1727 1728 /* Get number of sent descriptors and decrement counter. 1729 * The number of sent descriptors is returned. 1730 */ 1731 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, 1732 struct mvneta_tx_queue *txq) 1733 { 1734 int sent_desc; 1735 1736 /* Get number of sent descriptors */ 1737 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); 1738 1739 /* Decrement sent descriptors counter */ 1740 if (sent_desc) 1741 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); 1742 1743 return sent_desc; 1744 } 1745 1746 /* Set TXQ descriptors fields relevant for CSUM calculation */ 1747 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, 1748 int ip_hdr_len, int l4_proto) 1749 { 1750 u32 command; 1751 1752 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 1753 * G_L4_chk, L4_type; required only for checksum 1754 * calculation 1755 */ 1756 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1757 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1758 1759 if (l3_proto == htons(ETH_P_IP)) 1760 command |= MVNETA_TXD_IP_CSUM; 1761 else 1762 command |= MVNETA_TX_L3_IP6; 1763 1764 if (l4_proto == IPPROTO_TCP) 1765 command |= MVNETA_TX_L4_CSUM_FULL; 1766 else if (l4_proto == IPPROTO_UDP) 1767 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; 1768 else 1769 command |= MVNETA_TX_L4_CSUM_NOT; 1770 1771 return command; 1772 } 1773 1774 1775 /* Display more error info */ 1776 static void mvneta_rx_error(struct mvneta_port *pp, 1777 struct mvneta_rx_desc *rx_desc) 1778 { 1779 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1780 u32 status = rx_desc->status; 1781 1782 /* update per-cpu counter */ 1783 u64_stats_update_begin(&stats->syncp); 1784 stats->rx_errors++; 1785 u64_stats_update_end(&stats->syncp); 1786 1787 switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1788 case MVNETA_RXD_ERR_CRC: 1789 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", 1790 status, rx_desc->data_size); 1791 break; 1792 case MVNETA_RXD_ERR_OVERRUN: 1793 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", 1794 status, rx_desc->data_size); 1795 break; 1796 case MVNETA_RXD_ERR_LEN: 1797 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", 1798 status, rx_desc->data_size); 1799 break; 1800 case MVNETA_RXD_ERR_RESOURCE: 1801 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", 1802 status, rx_desc->data_size); 1803 break; 1804 } 1805 } 1806 1807 /* Handle RX checksum offload based on the descriptor's status */ 1808 static int mvneta_rx_csum(struct mvneta_port *pp, u32 status) 1809 { 1810 if ((pp->dev->features & NETIF_F_RXCSUM) && 1811 (status & MVNETA_RXD_L3_IP4) && 1812 (status & MVNETA_RXD_L4_CSUM_OK)) 1813 return CHECKSUM_UNNECESSARY; 1814 1815 return CHECKSUM_NONE; 1816 } 1817 1818 /* Return tx queue pointer (find last set bit) according to <cause> returned 1819 * form tx_done reg. <cause> must not be null. The return value is always a 1820 * valid queue for matching the first one found in <cause>. 1821 */ 1822 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, 1823 u32 cause) 1824 { 1825 int queue = fls(cause) - 1; 1826 1827 return &pp->txqs[queue]; 1828 } 1829 1830 /* Free tx queue skbuffs */ 1831 static void mvneta_txq_bufs_free(struct mvneta_port *pp, 1832 struct mvneta_tx_queue *txq, int num, 1833 struct netdev_queue *nq, bool napi) 1834 { 1835 unsigned int bytes_compl = 0, pkts_compl = 0; 1836 struct xdp_frame_bulk bq; 1837 int i; 1838 1839 xdp_frame_bulk_init(&bq); 1840 1841 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 1842 1843 for (i = 0; i < num; i++) { 1844 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; 1845 struct mvneta_tx_desc *tx_desc = txq->descs + 1846 txq->txq_get_index; 1847 1848 mvneta_txq_inc_get(txq); 1849 1850 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) && 1851 buf->type != MVNETA_TYPE_XDP_TX) 1852 dma_unmap_single(pp->dev->dev.parent, 1853 tx_desc->buf_phys_addr, 1854 tx_desc->data_size, DMA_TO_DEVICE); 1855 if (buf->type == MVNETA_TYPE_SKB && buf->skb) { 1856 bytes_compl += buf->skb->len; 1857 pkts_compl++; 1858 dev_kfree_skb_any(buf->skb); 1859 } else if (buf->type == MVNETA_TYPE_XDP_TX || 1860 buf->type == MVNETA_TYPE_XDP_NDO) { 1861 if (napi && buf->type == MVNETA_TYPE_XDP_TX) 1862 xdp_return_frame_rx_napi(buf->xdpf); 1863 else 1864 xdp_return_frame_bulk(buf->xdpf, &bq); 1865 } 1866 } 1867 xdp_flush_frame_bulk(&bq); 1868 1869 rcu_read_unlock(); 1870 1871 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); 1872 } 1873 1874 /* Handle end of transmission */ 1875 static void mvneta_txq_done(struct mvneta_port *pp, 1876 struct mvneta_tx_queue *txq) 1877 { 1878 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 1879 int tx_done; 1880 1881 tx_done = mvneta_txq_sent_desc_proc(pp, txq); 1882 if (!tx_done) 1883 return; 1884 1885 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); 1886 1887 txq->count -= tx_done; 1888 1889 if (netif_tx_queue_stopped(nq)) { 1890 if (txq->count <= txq->tx_wake_threshold) 1891 netif_tx_wake_queue(nq); 1892 } 1893 } 1894 1895 /* Refill processing for SW buffer management */ 1896 /* Allocate page per descriptor */ 1897 static int mvneta_rx_refill(struct mvneta_port *pp, 1898 struct mvneta_rx_desc *rx_desc, 1899 struct mvneta_rx_queue *rxq, 1900 gfp_t gfp_mask) 1901 { 1902 dma_addr_t phys_addr; 1903 struct page *page; 1904 1905 page = page_pool_alloc_pages(rxq->page_pool, 1906 gfp_mask | __GFP_NOWARN); 1907 if (!page) 1908 return -ENOMEM; 1909 1910 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; 1911 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); 1912 1913 return 0; 1914 } 1915 1916 /* Handle tx checksum */ 1917 static u32 mvneta_skb_tx_csum(struct sk_buff *skb) 1918 { 1919 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1920 int ip_hdr_len = 0; 1921 __be16 l3_proto = vlan_get_protocol(skb); 1922 u8 l4_proto; 1923 1924 if (l3_proto == htons(ETH_P_IP)) { 1925 struct iphdr *ip4h = ip_hdr(skb); 1926 1927 /* Calculate IPv4 checksum and L4 checksum */ 1928 ip_hdr_len = ip4h->ihl; 1929 l4_proto = ip4h->protocol; 1930 } else if (l3_proto == htons(ETH_P_IPV6)) { 1931 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1932 1933 /* Read l4_protocol from one of IPv6 extra headers */ 1934 if (skb_network_header_len(skb) > 0) 1935 ip_hdr_len = (skb_network_header_len(skb) >> 2); 1936 l4_proto = ip6h->nexthdr; 1937 } else 1938 return MVNETA_TX_L4_CSUM_NOT; 1939 1940 return mvneta_txq_desc_csum(skb_network_offset(skb), 1941 l3_proto, ip_hdr_len, l4_proto); 1942 } 1943 1944 return MVNETA_TX_L4_CSUM_NOT; 1945 } 1946 1947 /* Drop packets received by the RXQ and free buffers */ 1948 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, 1949 struct mvneta_rx_queue *rxq) 1950 { 1951 int rx_done, i; 1952 1953 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1954 if (rx_done) 1955 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1956 1957 if (pp->bm_priv) { 1958 for (i = 0; i < rx_done; i++) { 1959 struct mvneta_rx_desc *rx_desc = 1960 mvneta_rxq_next_desc_get(rxq); 1961 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); 1962 struct mvneta_bm_pool *bm_pool; 1963 1964 bm_pool = &pp->bm_priv->bm_pools[pool_id]; 1965 /* Return dropped buffer to the pool */ 1966 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 1967 rx_desc->buf_phys_addr); 1968 } 1969 return; 1970 } 1971 1972 for (i = 0; i < rxq->size; i++) { 1973 struct mvneta_rx_desc *rx_desc = rxq->descs + i; 1974 void *data = rxq->buf_virt_addr[i]; 1975 if (!data || !(rx_desc->buf_phys_addr)) 1976 continue; 1977 1978 page_pool_put_full_page(rxq->page_pool, data, false); 1979 } 1980 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 1981 xdp_rxq_info_unreg(&rxq->xdp_rxq); 1982 page_pool_destroy(rxq->page_pool); 1983 rxq->page_pool = NULL; 1984 } 1985 1986 static void 1987 mvneta_update_stats(struct mvneta_port *pp, 1988 struct mvneta_stats *ps) 1989 { 1990 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1991 1992 u64_stats_update_begin(&stats->syncp); 1993 stats->es.ps.rx_packets += ps->rx_packets; 1994 stats->es.ps.rx_bytes += ps->rx_bytes; 1995 /* xdp */ 1996 stats->es.ps.xdp_redirect += ps->xdp_redirect; 1997 stats->es.ps.xdp_pass += ps->xdp_pass; 1998 stats->es.ps.xdp_drop += ps->xdp_drop; 1999 u64_stats_update_end(&stats->syncp); 2000 } 2001 2002 static inline 2003 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) 2004 { 2005 struct mvneta_rx_desc *rx_desc; 2006 int curr_desc = rxq->first_to_refill; 2007 int i; 2008 2009 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { 2010 rx_desc = rxq->descs + curr_desc; 2011 if (!(rx_desc->buf_phys_addr)) { 2012 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { 2013 struct mvneta_pcpu_stats *stats; 2014 2015 pr_err("Can't refill queue %d. Done %d from %d\n", 2016 rxq->id, i, rxq->refill_num); 2017 2018 stats = this_cpu_ptr(pp->stats); 2019 u64_stats_update_begin(&stats->syncp); 2020 stats->es.refill_error++; 2021 u64_stats_update_end(&stats->syncp); 2022 break; 2023 } 2024 } 2025 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); 2026 } 2027 rxq->refill_num -= i; 2028 rxq->first_to_refill = curr_desc; 2029 2030 return i; 2031 } 2032 2033 static void 2034 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2035 struct xdp_buff *xdp, struct skb_shared_info *sinfo, 2036 int sync_len) 2037 { 2038 int i; 2039 2040 for (i = 0; i < sinfo->nr_frags; i++) 2041 page_pool_put_full_page(rxq->page_pool, 2042 skb_frag_page(&sinfo->frags[i]), true); 2043 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), 2044 sync_len, true); 2045 } 2046 2047 static int 2048 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, 2049 struct xdp_frame *xdpf, bool dma_map) 2050 { 2051 struct mvneta_tx_desc *tx_desc; 2052 struct mvneta_tx_buf *buf; 2053 dma_addr_t dma_addr; 2054 2055 if (txq->count >= txq->tx_stop_threshold) 2056 return MVNETA_XDP_DROPPED; 2057 2058 tx_desc = mvneta_txq_next_desc_get(txq); 2059 2060 buf = &txq->buf[txq->txq_put_index]; 2061 if (dma_map) { 2062 /* ndo_xdp_xmit */ 2063 dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data, 2064 xdpf->len, DMA_TO_DEVICE); 2065 if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { 2066 mvneta_txq_desc_put(txq); 2067 return MVNETA_XDP_DROPPED; 2068 } 2069 buf->type = MVNETA_TYPE_XDP_NDO; 2070 } else { 2071 struct page *page = virt_to_page(xdpf->data); 2072 2073 dma_addr = page_pool_get_dma_addr(page) + 2074 sizeof(*xdpf) + xdpf->headroom; 2075 dma_sync_single_for_device(pp->dev->dev.parent, dma_addr, 2076 xdpf->len, DMA_BIDIRECTIONAL); 2077 buf->type = MVNETA_TYPE_XDP_TX; 2078 } 2079 buf->xdpf = xdpf; 2080 2081 tx_desc->command = MVNETA_TXD_FLZ_DESC; 2082 tx_desc->buf_phys_addr = dma_addr; 2083 tx_desc->data_size = xdpf->len; 2084 2085 mvneta_txq_inc_put(txq); 2086 txq->pending++; 2087 txq->count++; 2088 2089 return MVNETA_XDP_TX; 2090 } 2091 2092 static int 2093 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) 2094 { 2095 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2096 struct mvneta_tx_queue *txq; 2097 struct netdev_queue *nq; 2098 struct xdp_frame *xdpf; 2099 int cpu; 2100 u32 ret; 2101 2102 xdpf = xdp_convert_buff_to_frame(xdp); 2103 if (unlikely(!xdpf)) 2104 return MVNETA_XDP_DROPPED; 2105 2106 cpu = smp_processor_id(); 2107 txq = &pp->txqs[cpu % txq_number]; 2108 nq = netdev_get_tx_queue(pp->dev, txq->id); 2109 2110 __netif_tx_lock(nq, cpu); 2111 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false); 2112 if (ret == MVNETA_XDP_TX) { 2113 u64_stats_update_begin(&stats->syncp); 2114 stats->es.ps.tx_bytes += xdpf->len; 2115 stats->es.ps.tx_packets++; 2116 stats->es.ps.xdp_tx++; 2117 u64_stats_update_end(&stats->syncp); 2118 2119 mvneta_txq_pend_desc_add(pp, txq, 0); 2120 } else { 2121 u64_stats_update_begin(&stats->syncp); 2122 stats->es.ps.xdp_tx_err++; 2123 u64_stats_update_end(&stats->syncp); 2124 } 2125 __netif_tx_unlock(nq); 2126 2127 return ret; 2128 } 2129 2130 static int 2131 mvneta_xdp_xmit(struct net_device *dev, int num_frame, 2132 struct xdp_frame **frames, u32 flags) 2133 { 2134 struct mvneta_port *pp = netdev_priv(dev); 2135 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2136 int i, nxmit_byte = 0, nxmit = 0; 2137 int cpu = smp_processor_id(); 2138 struct mvneta_tx_queue *txq; 2139 struct netdev_queue *nq; 2140 u32 ret; 2141 2142 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) 2143 return -ENETDOWN; 2144 2145 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2146 return -EINVAL; 2147 2148 txq = &pp->txqs[cpu % txq_number]; 2149 nq = netdev_get_tx_queue(pp->dev, txq->id); 2150 2151 __netif_tx_lock(nq, cpu); 2152 for (i = 0; i < num_frame; i++) { 2153 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true); 2154 if (ret != MVNETA_XDP_TX) 2155 break; 2156 2157 nxmit_byte += frames[i]->len; 2158 nxmit++; 2159 } 2160 2161 if (unlikely(flags & XDP_XMIT_FLUSH)) 2162 mvneta_txq_pend_desc_add(pp, txq, 0); 2163 __netif_tx_unlock(nq); 2164 2165 u64_stats_update_begin(&stats->syncp); 2166 stats->es.ps.tx_bytes += nxmit_byte; 2167 stats->es.ps.tx_packets += nxmit; 2168 stats->es.ps.xdp_xmit += nxmit; 2169 stats->es.ps.xdp_xmit_err += num_frame - nxmit; 2170 u64_stats_update_end(&stats->syncp); 2171 2172 return nxmit; 2173 } 2174 2175 static int 2176 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2177 struct bpf_prog *prog, struct xdp_buff *xdp, 2178 u32 frame_sz, struct mvneta_stats *stats) 2179 { 2180 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2181 unsigned int len, data_len, sync; 2182 u32 ret, act; 2183 2184 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; 2185 data_len = xdp->data_end - xdp->data; 2186 act = bpf_prog_run_xdp(prog, xdp); 2187 2188 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 2189 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; 2190 sync = max(sync, len); 2191 2192 switch (act) { 2193 case XDP_PASS: 2194 stats->xdp_pass++; 2195 return MVNETA_XDP_PASS; 2196 case XDP_REDIRECT: { 2197 int err; 2198 2199 err = xdp_do_redirect(pp->dev, xdp, prog); 2200 if (unlikely(err)) { 2201 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); 2202 ret = MVNETA_XDP_DROPPED; 2203 } else { 2204 ret = MVNETA_XDP_REDIR; 2205 stats->xdp_redirect++; 2206 } 2207 break; 2208 } 2209 case XDP_TX: 2210 ret = mvneta_xdp_xmit_back(pp, xdp); 2211 if (ret != MVNETA_XDP_TX) 2212 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); 2213 break; 2214 default: 2215 bpf_warn_invalid_xdp_action(act); 2216 fallthrough; 2217 case XDP_ABORTED: 2218 trace_xdp_exception(pp->dev, prog, act); 2219 fallthrough; 2220 case XDP_DROP: 2221 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); 2222 ret = MVNETA_XDP_DROPPED; 2223 stats->xdp_drop++; 2224 break; 2225 } 2226 2227 stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; 2228 stats->rx_packets++; 2229 2230 return ret; 2231 } 2232 2233 static void 2234 mvneta_swbm_rx_frame(struct mvneta_port *pp, 2235 struct mvneta_rx_desc *rx_desc, 2236 struct mvneta_rx_queue *rxq, 2237 struct xdp_buff *xdp, int *size, 2238 struct page *page) 2239 { 2240 unsigned char *data = page_address(page); 2241 int data_len = -MVNETA_MH_SIZE, len; 2242 struct net_device *dev = pp->dev; 2243 enum dma_data_direction dma_dir; 2244 struct skb_shared_info *sinfo; 2245 2246 if (*size > MVNETA_MAX_RX_BUF_SIZE) { 2247 len = MVNETA_MAX_RX_BUF_SIZE; 2248 data_len += len; 2249 } else { 2250 len = *size; 2251 data_len += len - ETH_FCS_LEN; 2252 } 2253 *size = *size - len; 2254 2255 dma_dir = page_pool_get_dma_dir(rxq->page_pool); 2256 dma_sync_single_for_cpu(dev->dev.parent, 2257 rx_desc->buf_phys_addr, 2258 len, dma_dir); 2259 2260 rx_desc->buf_phys_addr = 0; 2261 2262 /* Prefetch header */ 2263 prefetch(data); 2264 xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, 2265 data_len, false); 2266 2267 sinfo = xdp_get_shared_info_from_buff(xdp); 2268 sinfo->nr_frags = 0; 2269 } 2270 2271 static void 2272 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, 2273 struct mvneta_rx_desc *rx_desc, 2274 struct mvneta_rx_queue *rxq, 2275 struct xdp_buff *xdp, int *size, 2276 struct skb_shared_info *xdp_sinfo, 2277 struct page *page) 2278 { 2279 struct net_device *dev = pp->dev; 2280 enum dma_data_direction dma_dir; 2281 int data_len, len; 2282 2283 if (*size > MVNETA_MAX_RX_BUF_SIZE) { 2284 len = MVNETA_MAX_RX_BUF_SIZE; 2285 data_len = len; 2286 } else { 2287 len = *size; 2288 data_len = len - ETH_FCS_LEN; 2289 } 2290 dma_dir = page_pool_get_dma_dir(rxq->page_pool); 2291 dma_sync_single_for_cpu(dev->dev.parent, 2292 rx_desc->buf_phys_addr, 2293 len, dma_dir); 2294 rx_desc->buf_phys_addr = 0; 2295 2296 if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) { 2297 skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++]; 2298 2299 skb_frag_off_set(frag, pp->rx_offset_correction); 2300 skb_frag_size_set(frag, data_len); 2301 __skb_frag_set_page(frag, page); 2302 } else { 2303 page_pool_put_full_page(rxq->page_pool, page, true); 2304 } 2305 2306 /* last fragment */ 2307 if (len == *size) { 2308 struct skb_shared_info *sinfo; 2309 2310 sinfo = xdp_get_shared_info_from_buff(xdp); 2311 sinfo->nr_frags = xdp_sinfo->nr_frags; 2312 memcpy(sinfo->frags, xdp_sinfo->frags, 2313 sinfo->nr_frags * sizeof(skb_frag_t)); 2314 } 2315 *size -= len; 2316 } 2317 2318 static struct sk_buff * 2319 mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, 2320 struct xdp_buff *xdp, u32 desc_status) 2321 { 2322 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2323 int i, num_frags = sinfo->nr_frags; 2324 struct sk_buff *skb; 2325 2326 skb = build_skb(xdp->data_hard_start, PAGE_SIZE); 2327 if (!skb) 2328 return ERR_PTR(-ENOMEM); 2329 2330 skb_mark_for_recycle(skb); 2331 2332 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2333 skb_put(skb, xdp->data_end - xdp->data); 2334 skb->ip_summed = mvneta_rx_csum(pp, desc_status); 2335 2336 for (i = 0; i < num_frags; i++) { 2337 skb_frag_t *frag = &sinfo->frags[i]; 2338 2339 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 2340 skb_frag_page(frag), skb_frag_off(frag), 2341 skb_frag_size(frag), PAGE_SIZE); 2342 } 2343 2344 return skb; 2345 } 2346 2347 /* Main rx processing when using software buffer management */ 2348 static int mvneta_rx_swbm(struct napi_struct *napi, 2349 struct mvneta_port *pp, int budget, 2350 struct mvneta_rx_queue *rxq) 2351 { 2352 int rx_proc = 0, rx_todo, refill, size = 0; 2353 struct net_device *dev = pp->dev; 2354 struct skb_shared_info sinfo; 2355 struct mvneta_stats ps = {}; 2356 struct bpf_prog *xdp_prog; 2357 u32 desc_status, frame_sz; 2358 struct xdp_buff xdp_buf; 2359 2360 xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq); 2361 xdp_buf.data_hard_start = NULL; 2362 2363 sinfo.nr_frags = 0; 2364 2365 /* Get number of received packets */ 2366 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); 2367 2368 xdp_prog = READ_ONCE(pp->xdp_prog); 2369 2370 /* Fairness NAPI loop */ 2371 while (rx_proc < budget && rx_proc < rx_todo) { 2372 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 2373 u32 rx_status, index; 2374 struct sk_buff *skb; 2375 struct page *page; 2376 2377 index = rx_desc - rxq->descs; 2378 page = (struct page *)rxq->buf_virt_addr[index]; 2379 2380 rx_status = rx_desc->status; 2381 rx_proc++; 2382 rxq->refill_num++; 2383 2384 if (rx_status & MVNETA_RXD_FIRST_DESC) { 2385 /* Check errors only for FIRST descriptor */ 2386 if (rx_status & MVNETA_RXD_ERR_SUMMARY) { 2387 mvneta_rx_error(pp, rx_desc); 2388 goto next; 2389 } 2390 2391 size = rx_desc->data_size; 2392 frame_sz = size - ETH_FCS_LEN; 2393 desc_status = rx_status; 2394 2395 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, 2396 &size, page); 2397 } else { 2398 if (unlikely(!xdp_buf.data_hard_start)) { 2399 rx_desc->buf_phys_addr = 0; 2400 page_pool_put_full_page(rxq->page_pool, page, 2401 true); 2402 goto next; 2403 } 2404 2405 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, 2406 &size, &sinfo, page); 2407 } /* Middle or Last descriptor */ 2408 2409 if (!(rx_status & MVNETA_RXD_LAST_DESC)) 2410 /* no last descriptor this time */ 2411 continue; 2412 2413 if (size) { 2414 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); 2415 goto next; 2416 } 2417 2418 if (xdp_prog && 2419 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) 2420 goto next; 2421 2422 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status); 2423 if (IS_ERR(skb)) { 2424 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2425 2426 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); 2427 2428 u64_stats_update_begin(&stats->syncp); 2429 stats->es.skb_alloc_error++; 2430 stats->rx_dropped++; 2431 u64_stats_update_end(&stats->syncp); 2432 2433 goto next; 2434 } 2435 2436 ps.rx_bytes += skb->len; 2437 ps.rx_packets++; 2438 2439 skb->protocol = eth_type_trans(skb, dev); 2440 napi_gro_receive(napi, skb); 2441 next: 2442 xdp_buf.data_hard_start = NULL; 2443 sinfo.nr_frags = 0; 2444 } 2445 2446 if (xdp_buf.data_hard_start) 2447 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); 2448 2449 if (ps.xdp_redirect) 2450 xdp_do_flush_map(); 2451 2452 if (ps.rx_packets) 2453 mvneta_update_stats(pp, &ps); 2454 2455 /* return some buffers to hardware queue, one at a time is too slow */ 2456 refill = mvneta_rx_refill_queue(pp, rxq); 2457 2458 /* Update rxq management counters */ 2459 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); 2460 2461 return ps.rx_packets; 2462 } 2463 2464 /* Main rx processing when using hardware buffer management */ 2465 static int mvneta_rx_hwbm(struct napi_struct *napi, 2466 struct mvneta_port *pp, int rx_todo, 2467 struct mvneta_rx_queue *rxq) 2468 { 2469 struct net_device *dev = pp->dev; 2470 int rx_done; 2471 u32 rcvd_pkts = 0; 2472 u32 rcvd_bytes = 0; 2473 2474 /* Get number of received packets */ 2475 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 2476 2477 if (rx_todo > rx_done) 2478 rx_todo = rx_done; 2479 2480 rx_done = 0; 2481 2482 /* Fairness NAPI loop */ 2483 while (rx_done < rx_todo) { 2484 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 2485 struct mvneta_bm_pool *bm_pool = NULL; 2486 struct sk_buff *skb; 2487 unsigned char *data; 2488 dma_addr_t phys_addr; 2489 u32 rx_status, frag_size; 2490 int rx_bytes, err; 2491 u8 pool_id; 2492 2493 rx_done++; 2494 rx_status = rx_desc->status; 2495 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 2496 data = (u8 *)(uintptr_t)rx_desc->buf_cookie; 2497 phys_addr = rx_desc->buf_phys_addr; 2498 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); 2499 bm_pool = &pp->bm_priv->bm_pools[pool_id]; 2500 2501 if (!mvneta_rxq_desc_is_first_last(rx_status) || 2502 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 2503 err_drop_frame_ret_pool: 2504 /* Return the buffer to the pool */ 2505 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2506 rx_desc->buf_phys_addr); 2507 err_drop_frame: 2508 mvneta_rx_error(pp, rx_desc); 2509 /* leave the descriptor untouched */ 2510 continue; 2511 } 2512 2513 if (rx_bytes <= rx_copybreak) { 2514 /* better copy a small frame and not unmap the DMA region */ 2515 skb = netdev_alloc_skb_ip_align(dev, rx_bytes); 2516 if (unlikely(!skb)) 2517 goto err_drop_frame_ret_pool; 2518 2519 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, 2520 rx_desc->buf_phys_addr, 2521 MVNETA_MH_SIZE + NET_SKB_PAD, 2522 rx_bytes, 2523 DMA_FROM_DEVICE); 2524 skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD, 2525 rx_bytes); 2526 2527 skb->protocol = eth_type_trans(skb, dev); 2528 skb->ip_summed = mvneta_rx_csum(pp, rx_status); 2529 napi_gro_receive(napi, skb); 2530 2531 rcvd_pkts++; 2532 rcvd_bytes += rx_bytes; 2533 2534 /* Return the buffer to the pool */ 2535 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2536 rx_desc->buf_phys_addr); 2537 2538 /* leave the descriptor and buffer untouched */ 2539 continue; 2540 } 2541 2542 /* Refill processing */ 2543 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); 2544 if (err) { 2545 struct mvneta_pcpu_stats *stats; 2546 2547 netdev_err(dev, "Linux processing - Can't refill\n"); 2548 2549 stats = this_cpu_ptr(pp->stats); 2550 u64_stats_update_begin(&stats->syncp); 2551 stats->es.refill_error++; 2552 u64_stats_update_end(&stats->syncp); 2553 2554 goto err_drop_frame_ret_pool; 2555 } 2556 2557 frag_size = bm_pool->hwbm_pool.frag_size; 2558 2559 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); 2560 2561 /* After refill old buffer has to be unmapped regardless 2562 * the skb is successfully built or not. 2563 */ 2564 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, 2565 bm_pool->buf_size, DMA_FROM_DEVICE); 2566 if (!skb) 2567 goto err_drop_frame; 2568 2569 rcvd_pkts++; 2570 rcvd_bytes += rx_bytes; 2571 2572 /* Linux processing */ 2573 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); 2574 skb_put(skb, rx_bytes); 2575 2576 skb->protocol = eth_type_trans(skb, dev); 2577 skb->ip_summed = mvneta_rx_csum(pp, rx_status); 2578 2579 napi_gro_receive(napi, skb); 2580 } 2581 2582 if (rcvd_pkts) { 2583 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2584 2585 u64_stats_update_begin(&stats->syncp); 2586 stats->es.ps.rx_packets += rcvd_pkts; 2587 stats->es.ps.rx_bytes += rcvd_bytes; 2588 u64_stats_update_end(&stats->syncp); 2589 } 2590 2591 /* Update rxq management counters */ 2592 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 2593 2594 return rx_done; 2595 } 2596 2597 static inline void 2598 mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq) 2599 { 2600 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2601 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2602 struct mvneta_tx_desc *tx_desc; 2603 2604 tx_desc = mvneta_txq_next_desc_get(txq); 2605 tx_desc->data_size = hdr_len; 2606 tx_desc->command = mvneta_skb_tx_csum(skb); 2607 tx_desc->command |= MVNETA_TXD_F_DESC; 2608 tx_desc->buf_phys_addr = txq->tso_hdrs_phys + 2609 txq->txq_put_index * TSO_HEADER_SIZE; 2610 buf->type = MVNETA_TYPE_SKB; 2611 buf->skb = NULL; 2612 2613 mvneta_txq_inc_put(txq); 2614 } 2615 2616 static inline int 2617 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, 2618 struct sk_buff *skb, char *data, int size, 2619 bool last_tcp, bool is_last) 2620 { 2621 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2622 struct mvneta_tx_desc *tx_desc; 2623 2624 tx_desc = mvneta_txq_next_desc_get(txq); 2625 tx_desc->data_size = size; 2626 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, 2627 size, DMA_TO_DEVICE); 2628 if (unlikely(dma_mapping_error(dev->dev.parent, 2629 tx_desc->buf_phys_addr))) { 2630 mvneta_txq_desc_put(txq); 2631 return -ENOMEM; 2632 } 2633 2634 tx_desc->command = 0; 2635 buf->type = MVNETA_TYPE_SKB; 2636 buf->skb = NULL; 2637 2638 if (last_tcp) { 2639 /* last descriptor in the TCP packet */ 2640 tx_desc->command = MVNETA_TXD_L_DESC; 2641 2642 /* last descriptor in SKB */ 2643 if (is_last) 2644 buf->skb = skb; 2645 } 2646 mvneta_txq_inc_put(txq); 2647 return 0; 2648 } 2649 2650 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, 2651 struct mvneta_tx_queue *txq) 2652 { 2653 int hdr_len, total_len, data_left; 2654 int desc_count = 0; 2655 struct mvneta_port *pp = netdev_priv(dev); 2656 struct tso_t tso; 2657 int i; 2658 2659 /* Count needed descriptors */ 2660 if ((txq->count + tso_count_descs(skb)) >= txq->size) 2661 return 0; 2662 2663 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { 2664 pr_info("*** Is this even possible?\n"); 2665 return 0; 2666 } 2667 2668 /* Initialize the TSO handler, and prepare the first payload */ 2669 hdr_len = tso_start(skb, &tso); 2670 2671 total_len = skb->len - hdr_len; 2672 while (total_len > 0) { 2673 char *hdr; 2674 2675 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 2676 total_len -= data_left; 2677 desc_count++; 2678 2679 /* prepare packet headers: MAC + IP + TCP */ 2680 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; 2681 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 2682 2683 mvneta_tso_put_hdr(skb, txq); 2684 2685 while (data_left > 0) { 2686 int size; 2687 desc_count++; 2688 2689 size = min_t(int, tso.size, data_left); 2690 2691 if (mvneta_tso_put_data(dev, txq, skb, 2692 tso.data, size, 2693 size == data_left, 2694 total_len == 0)) 2695 goto err_release; 2696 data_left -= size; 2697 2698 tso_build_data(skb, &tso, size); 2699 } 2700 } 2701 2702 return desc_count; 2703 2704 err_release: 2705 /* Release all used data descriptors; header descriptors must not 2706 * be DMA-unmapped. 2707 */ 2708 for (i = desc_count - 1; i >= 0; i--) { 2709 struct mvneta_tx_desc *tx_desc = txq->descs + i; 2710 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) 2711 dma_unmap_single(pp->dev->dev.parent, 2712 tx_desc->buf_phys_addr, 2713 tx_desc->data_size, 2714 DMA_TO_DEVICE); 2715 mvneta_txq_desc_put(txq); 2716 } 2717 return 0; 2718 } 2719 2720 /* Handle tx fragmentation processing */ 2721 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 2722 struct mvneta_tx_queue *txq) 2723 { 2724 struct mvneta_tx_desc *tx_desc; 2725 int i, nr_frags = skb_shinfo(skb)->nr_frags; 2726 2727 for (i = 0; i < nr_frags; i++) { 2728 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2729 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2730 void *addr = skb_frag_address(frag); 2731 2732 tx_desc = mvneta_txq_next_desc_get(txq); 2733 tx_desc->data_size = skb_frag_size(frag); 2734 2735 tx_desc->buf_phys_addr = 2736 dma_map_single(pp->dev->dev.parent, addr, 2737 tx_desc->data_size, DMA_TO_DEVICE); 2738 2739 if (dma_mapping_error(pp->dev->dev.parent, 2740 tx_desc->buf_phys_addr)) { 2741 mvneta_txq_desc_put(txq); 2742 goto error; 2743 } 2744 2745 if (i == nr_frags - 1) { 2746 /* Last descriptor */ 2747 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 2748 buf->skb = skb; 2749 } else { 2750 /* Descriptor in the middle: Not First, Not Last */ 2751 tx_desc->command = 0; 2752 buf->skb = NULL; 2753 } 2754 buf->type = MVNETA_TYPE_SKB; 2755 mvneta_txq_inc_put(txq); 2756 } 2757 2758 return 0; 2759 2760 error: 2761 /* Release all descriptors that were used to map fragments of 2762 * this packet, as well as the corresponding DMA mappings 2763 */ 2764 for (i = i - 1; i >= 0; i--) { 2765 tx_desc = txq->descs + i; 2766 dma_unmap_single(pp->dev->dev.parent, 2767 tx_desc->buf_phys_addr, 2768 tx_desc->data_size, 2769 DMA_TO_DEVICE); 2770 mvneta_txq_desc_put(txq); 2771 } 2772 2773 return -ENOMEM; 2774 } 2775 2776 /* Main tx processing */ 2777 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) 2778 { 2779 struct mvneta_port *pp = netdev_priv(dev); 2780 u16 txq_id = skb_get_queue_mapping(skb); 2781 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 2782 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2783 struct mvneta_tx_desc *tx_desc; 2784 int len = skb->len; 2785 int frags = 0; 2786 u32 tx_cmd; 2787 2788 if (!netif_running(dev)) 2789 goto out; 2790 2791 if (skb_is_gso(skb)) { 2792 frags = mvneta_tx_tso(skb, dev, txq); 2793 goto out; 2794 } 2795 2796 frags = skb_shinfo(skb)->nr_frags + 1; 2797 2798 /* Get a descriptor for the first part of the packet */ 2799 tx_desc = mvneta_txq_next_desc_get(txq); 2800 2801 tx_cmd = mvneta_skb_tx_csum(skb); 2802 2803 tx_desc->data_size = skb_headlen(skb); 2804 2805 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, 2806 tx_desc->data_size, 2807 DMA_TO_DEVICE); 2808 if (unlikely(dma_mapping_error(dev->dev.parent, 2809 tx_desc->buf_phys_addr))) { 2810 mvneta_txq_desc_put(txq); 2811 frags = 0; 2812 goto out; 2813 } 2814 2815 buf->type = MVNETA_TYPE_SKB; 2816 if (frags == 1) { 2817 /* First and Last descriptor */ 2818 tx_cmd |= MVNETA_TXD_FLZ_DESC; 2819 tx_desc->command = tx_cmd; 2820 buf->skb = skb; 2821 mvneta_txq_inc_put(txq); 2822 } else { 2823 /* First but not Last */ 2824 tx_cmd |= MVNETA_TXD_F_DESC; 2825 buf->skb = NULL; 2826 mvneta_txq_inc_put(txq); 2827 tx_desc->command = tx_cmd; 2828 /* Continue with other skb fragments */ 2829 if (mvneta_tx_frag_process(pp, skb, txq)) { 2830 dma_unmap_single(dev->dev.parent, 2831 tx_desc->buf_phys_addr, 2832 tx_desc->data_size, 2833 DMA_TO_DEVICE); 2834 mvneta_txq_desc_put(txq); 2835 frags = 0; 2836 goto out; 2837 } 2838 } 2839 2840 out: 2841 if (frags > 0) { 2842 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 2843 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2844 2845 netdev_tx_sent_queue(nq, len); 2846 2847 txq->count += frags; 2848 if (txq->count >= txq->tx_stop_threshold) 2849 netif_tx_stop_queue(nq); 2850 2851 if (!netdev_xmit_more() || netif_xmit_stopped(nq) || 2852 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) 2853 mvneta_txq_pend_desc_add(pp, txq, frags); 2854 else 2855 txq->pending += frags; 2856 2857 u64_stats_update_begin(&stats->syncp); 2858 stats->es.ps.tx_bytes += len; 2859 stats->es.ps.tx_packets++; 2860 u64_stats_update_end(&stats->syncp); 2861 } else { 2862 dev->stats.tx_dropped++; 2863 dev_kfree_skb_any(skb); 2864 } 2865 2866 return NETDEV_TX_OK; 2867 } 2868 2869 2870 /* Free tx resources, when resetting a port */ 2871 static void mvneta_txq_done_force(struct mvneta_port *pp, 2872 struct mvneta_tx_queue *txq) 2873 2874 { 2875 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 2876 int tx_done = txq->count; 2877 2878 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false); 2879 2880 /* reset txq */ 2881 txq->count = 0; 2882 txq->txq_put_index = 0; 2883 txq->txq_get_index = 0; 2884 } 2885 2886 /* Handle tx done - called in softirq context. The <cause_tx_done> argument 2887 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. 2888 */ 2889 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) 2890 { 2891 struct mvneta_tx_queue *txq; 2892 struct netdev_queue *nq; 2893 int cpu = smp_processor_id(); 2894 2895 while (cause_tx_done) { 2896 txq = mvneta_tx_done_policy(pp, cause_tx_done); 2897 2898 nq = netdev_get_tx_queue(pp->dev, txq->id); 2899 __netif_tx_lock(nq, cpu); 2900 2901 if (txq->count) 2902 mvneta_txq_done(pp, txq); 2903 2904 __netif_tx_unlock(nq); 2905 cause_tx_done &= ~((1 << txq->id)); 2906 } 2907 } 2908 2909 /* Compute crc8 of the specified address, using a unique algorithm , 2910 * according to hw spec, different than generic crc8 algorithm 2911 */ 2912 static int mvneta_addr_crc(unsigned char *addr) 2913 { 2914 int crc = 0; 2915 int i; 2916 2917 for (i = 0; i < ETH_ALEN; i++) { 2918 int j; 2919 2920 crc = (crc ^ addr[i]) << 8; 2921 for (j = 7; j >= 0; j--) { 2922 if (crc & (0x100 << j)) 2923 crc ^= 0x107 << j; 2924 } 2925 } 2926 2927 return crc; 2928 } 2929 2930 /* This method controls the net device special MAC multicast support. 2931 * The Special Multicast Table for MAC addresses supports MAC of the form 2932 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 2933 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 2934 * Table entries in the DA-Filter table. This method set the Special 2935 * Multicast Table appropriate entry. 2936 */ 2937 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, 2938 unsigned char last_byte, 2939 int queue) 2940 { 2941 unsigned int smc_table_reg; 2942 unsigned int tbl_offset; 2943 unsigned int reg_offset; 2944 2945 /* Register offset from SMC table base */ 2946 tbl_offset = (last_byte / 4); 2947 /* Entry offset within the above reg */ 2948 reg_offset = last_byte % 4; 2949 2950 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST 2951 + tbl_offset * 4)); 2952 2953 if (queue == -1) 2954 smc_table_reg &= ~(0xff << (8 * reg_offset)); 2955 else { 2956 smc_table_reg &= ~(0xff << (8 * reg_offset)); 2957 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 2958 } 2959 2960 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, 2961 smc_table_reg); 2962 } 2963 2964 /* This method controls the network device Other MAC multicast support. 2965 * The Other Multicast Table is used for multicast of another type. 2966 * A CRC-8 is used as an index to the Other Multicast Table entries 2967 * in the DA-Filter table. 2968 * The method gets the CRC-8 value from the calling routine and 2969 * sets the Other Multicast Table appropriate entry according to the 2970 * specified CRC-8 . 2971 */ 2972 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, 2973 unsigned char crc8, 2974 int queue) 2975 { 2976 unsigned int omc_table_reg; 2977 unsigned int tbl_offset; 2978 unsigned int reg_offset; 2979 2980 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ 2981 reg_offset = crc8 % 4; /* Entry offset within the above reg */ 2982 2983 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); 2984 2985 if (queue == -1) { 2986 /* Clear accepts frame bit at specified Other DA table entry */ 2987 omc_table_reg &= ~(0xff << (8 * reg_offset)); 2988 } else { 2989 omc_table_reg &= ~(0xff << (8 * reg_offset)); 2990 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 2991 } 2992 2993 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); 2994 } 2995 2996 /* The network device supports multicast using two tables: 2997 * 1) Special Multicast Table for MAC addresses of the form 2998 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 2999 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 3000 * Table entries in the DA-Filter table. 3001 * 2) Other Multicast Table for multicast of another type. A CRC-8 value 3002 * is used as an index to the Other Multicast Table entries in the 3003 * DA-Filter table. 3004 */ 3005 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, 3006 int queue) 3007 { 3008 unsigned char crc_result = 0; 3009 3010 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { 3011 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); 3012 return 0; 3013 } 3014 3015 crc_result = mvneta_addr_crc(p_addr); 3016 if (queue == -1) { 3017 if (pp->mcast_count[crc_result] == 0) { 3018 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", 3019 crc_result); 3020 return -EINVAL; 3021 } 3022 3023 pp->mcast_count[crc_result]--; 3024 if (pp->mcast_count[crc_result] != 0) { 3025 netdev_info(pp->dev, 3026 "After delete there are %d valid Mcast for crc8=0x%02x\n", 3027 pp->mcast_count[crc_result], crc_result); 3028 return -EINVAL; 3029 } 3030 } else 3031 pp->mcast_count[crc_result]++; 3032 3033 mvneta_set_other_mcast_addr(pp, crc_result, queue); 3034 3035 return 0; 3036 } 3037 3038 /* Configure Fitering mode of Ethernet port */ 3039 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, 3040 int is_promisc) 3041 { 3042 u32 port_cfg_reg, val; 3043 3044 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); 3045 3046 val = mvreg_read(pp, MVNETA_TYPE_PRIO); 3047 3048 /* Set / Clear UPM bit in port configuration register */ 3049 if (is_promisc) { 3050 /* Accept all Unicast addresses */ 3051 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; 3052 val |= MVNETA_FORCE_UNI; 3053 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); 3054 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); 3055 } else { 3056 /* Reject all Unicast addresses */ 3057 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; 3058 val &= ~MVNETA_FORCE_UNI; 3059 } 3060 3061 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); 3062 mvreg_write(pp, MVNETA_TYPE_PRIO, val); 3063 } 3064 3065 /* register unicast and multicast addresses */ 3066 static void mvneta_set_rx_mode(struct net_device *dev) 3067 { 3068 struct mvneta_port *pp = netdev_priv(dev); 3069 struct netdev_hw_addr *ha; 3070 3071 if (dev->flags & IFF_PROMISC) { 3072 /* Accept all: Multicast + Unicast */ 3073 mvneta_rx_unicast_promisc_set(pp, 1); 3074 mvneta_set_ucast_table(pp, pp->rxq_def); 3075 mvneta_set_special_mcast_table(pp, pp->rxq_def); 3076 mvneta_set_other_mcast_table(pp, pp->rxq_def); 3077 } else { 3078 /* Accept single Unicast */ 3079 mvneta_rx_unicast_promisc_set(pp, 0); 3080 mvneta_set_ucast_table(pp, -1); 3081 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); 3082 3083 if (dev->flags & IFF_ALLMULTI) { 3084 /* Accept all multicast */ 3085 mvneta_set_special_mcast_table(pp, pp->rxq_def); 3086 mvneta_set_other_mcast_table(pp, pp->rxq_def); 3087 } else { 3088 /* Accept only initialized multicast */ 3089 mvneta_set_special_mcast_table(pp, -1); 3090 mvneta_set_other_mcast_table(pp, -1); 3091 3092 if (!netdev_mc_empty(dev)) { 3093 netdev_for_each_mc_addr(ha, dev) { 3094 mvneta_mcast_addr_set(pp, ha->addr, 3095 pp->rxq_def); 3096 } 3097 } 3098 } 3099 } 3100 } 3101 3102 /* Interrupt handling - the callback for request_irq() */ 3103 static irqreturn_t mvneta_isr(int irq, void *dev_id) 3104 { 3105 struct mvneta_port *pp = (struct mvneta_port *)dev_id; 3106 3107 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 3108 napi_schedule(&pp->napi); 3109 3110 return IRQ_HANDLED; 3111 } 3112 3113 /* Interrupt handling - the callback for request_percpu_irq() */ 3114 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id) 3115 { 3116 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; 3117 3118 disable_percpu_irq(port->pp->dev->irq); 3119 napi_schedule(&port->napi); 3120 3121 return IRQ_HANDLED; 3122 } 3123 3124 static void mvneta_link_change(struct mvneta_port *pp) 3125 { 3126 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 3127 3128 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); 3129 } 3130 3131 /* NAPI handler 3132 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 3133 * packets on the corresponding TXQ (Bit 0 is for TX queue 1). 3134 * Bits 8 -15 of the cause Rx Tx register indicate that are received 3135 * packets on the corresponding RXQ (Bit 8 is for RX queue 0). 3136 * Each CPU has its own causeRxTx register 3137 */ 3138 static int mvneta_poll(struct napi_struct *napi, int budget) 3139 { 3140 int rx_done = 0; 3141 u32 cause_rx_tx; 3142 int rx_queue; 3143 struct mvneta_port *pp = netdev_priv(napi->dev); 3144 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); 3145 3146 if (!netif_running(pp->dev)) { 3147 napi_complete(napi); 3148 return rx_done; 3149 } 3150 3151 /* Read cause register */ 3152 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); 3153 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { 3154 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); 3155 3156 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 3157 3158 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE | 3159 MVNETA_CAUSE_LINK_CHANGE)) 3160 mvneta_link_change(pp); 3161 } 3162 3163 /* Release Tx descriptors */ 3164 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { 3165 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); 3166 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; 3167 } 3168 3169 /* For the case where the last mvneta_poll did not process all 3170 * RX packets 3171 */ 3172 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : 3173 port->cause_rx_tx; 3174 3175 rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); 3176 if (rx_queue) { 3177 rx_queue = rx_queue - 1; 3178 if (pp->bm_priv) 3179 rx_done = mvneta_rx_hwbm(napi, pp, budget, 3180 &pp->rxqs[rx_queue]); 3181 else 3182 rx_done = mvneta_rx_swbm(napi, pp, budget, 3183 &pp->rxqs[rx_queue]); 3184 } 3185 3186 if (rx_done < budget) { 3187 cause_rx_tx = 0; 3188 napi_complete_done(napi, rx_done); 3189 3190 if (pp->neta_armada3700) { 3191 unsigned long flags; 3192 3193 local_irq_save(flags); 3194 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 3195 MVNETA_RX_INTR_MASK(rxq_number) | 3196 MVNETA_TX_INTR_MASK(txq_number) | 3197 MVNETA_MISCINTR_INTR_MASK); 3198 local_irq_restore(flags); 3199 } else { 3200 enable_percpu_irq(pp->dev->irq, 0); 3201 } 3202 } 3203 3204 if (pp->neta_armada3700) 3205 pp->cause_rx_tx = cause_rx_tx; 3206 else 3207 port->cause_rx_tx = cause_rx_tx; 3208 3209 return rx_done; 3210 } 3211 3212 static int mvneta_create_page_pool(struct mvneta_port *pp, 3213 struct mvneta_rx_queue *rxq, int size) 3214 { 3215 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); 3216 struct page_pool_params pp_params = { 3217 .order = 0, 3218 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 3219 .pool_size = size, 3220 .nid = NUMA_NO_NODE, 3221 .dev = pp->dev->dev.parent, 3222 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 3223 .offset = pp->rx_offset_correction, 3224 .max_len = MVNETA_MAX_RX_BUF_SIZE, 3225 }; 3226 int err; 3227 3228 rxq->page_pool = page_pool_create(&pp_params); 3229 if (IS_ERR(rxq->page_pool)) { 3230 err = PTR_ERR(rxq->page_pool); 3231 rxq->page_pool = NULL; 3232 return err; 3233 } 3234 3235 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0); 3236 if (err < 0) 3237 goto err_free_pp; 3238 3239 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 3240 rxq->page_pool); 3241 if (err) 3242 goto err_unregister_rxq; 3243 3244 return 0; 3245 3246 err_unregister_rxq: 3247 xdp_rxq_info_unreg(&rxq->xdp_rxq); 3248 err_free_pp: 3249 page_pool_destroy(rxq->page_pool); 3250 rxq->page_pool = NULL; 3251 return err; 3252 } 3253 3254 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ 3255 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 3256 int num) 3257 { 3258 int i, err; 3259 3260 err = mvneta_create_page_pool(pp, rxq, num); 3261 if (err < 0) 3262 return err; 3263 3264 for (i = 0; i < num; i++) { 3265 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); 3266 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, 3267 GFP_KERNEL) != 0) { 3268 netdev_err(pp->dev, 3269 "%s:rxq %d, %d of %d buffs filled\n", 3270 __func__, rxq->id, i, num); 3271 break; 3272 } 3273 } 3274 3275 /* Add this number of RX descriptors as non occupied (ready to 3276 * get packets) 3277 */ 3278 mvneta_rxq_non_occup_desc_add(pp, rxq, i); 3279 3280 return i; 3281 } 3282 3283 /* Free all packets pending transmit from all TXQs and reset TX port */ 3284 static void mvneta_tx_reset(struct mvneta_port *pp) 3285 { 3286 int queue; 3287 3288 /* free the skb's in the tx ring */ 3289 for (queue = 0; queue < txq_number; queue++) 3290 mvneta_txq_done_force(pp, &pp->txqs[queue]); 3291 3292 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 3293 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 3294 } 3295 3296 static void mvneta_rx_reset(struct mvneta_port *pp) 3297 { 3298 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 3299 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 3300 } 3301 3302 /* Rx/Tx queue initialization/cleanup methods */ 3303 3304 static int mvneta_rxq_sw_init(struct mvneta_port *pp, 3305 struct mvneta_rx_queue *rxq) 3306 { 3307 rxq->size = pp->rx_ring_size; 3308 3309 /* Allocate memory for RX descriptors */ 3310 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 3311 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 3312 &rxq->descs_phys, GFP_KERNEL); 3313 if (!rxq->descs) 3314 return -ENOMEM; 3315 3316 rxq->last_desc = rxq->size - 1; 3317 3318 return 0; 3319 } 3320 3321 static void mvneta_rxq_hw_init(struct mvneta_port *pp, 3322 struct mvneta_rx_queue *rxq) 3323 { 3324 /* Set Rx descriptors queue starting address */ 3325 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); 3326 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); 3327 3328 /* Set coalescing pkts and time */ 3329 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 3330 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 3331 3332 if (!pp->bm_priv) { 3333 /* Set Offset */ 3334 mvneta_rxq_offset_set(pp, rxq, 0); 3335 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? 3336 MVNETA_MAX_RX_BUF_SIZE : 3337 MVNETA_RX_BUF_SIZE(pp->pkt_size)); 3338 mvneta_rxq_bm_disable(pp, rxq); 3339 mvneta_rxq_fill(pp, rxq, rxq->size); 3340 } else { 3341 /* Set Offset */ 3342 mvneta_rxq_offset_set(pp, rxq, 3343 NET_SKB_PAD - pp->rx_offset_correction); 3344 3345 mvneta_rxq_bm_enable(pp, rxq); 3346 /* Fill RXQ with buffers from RX pool */ 3347 mvneta_rxq_long_pool_set(pp, rxq); 3348 mvneta_rxq_short_pool_set(pp, rxq); 3349 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); 3350 } 3351 } 3352 3353 /* Create a specified RX queue */ 3354 static int mvneta_rxq_init(struct mvneta_port *pp, 3355 struct mvneta_rx_queue *rxq) 3356 3357 { 3358 int ret; 3359 3360 ret = mvneta_rxq_sw_init(pp, rxq); 3361 if (ret < 0) 3362 return ret; 3363 3364 mvneta_rxq_hw_init(pp, rxq); 3365 3366 return 0; 3367 } 3368 3369 /* Cleanup Rx queue */ 3370 static void mvneta_rxq_deinit(struct mvneta_port *pp, 3371 struct mvneta_rx_queue *rxq) 3372 { 3373 mvneta_rxq_drop_pkts(pp, rxq); 3374 3375 if (rxq->descs) 3376 dma_free_coherent(pp->dev->dev.parent, 3377 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 3378 rxq->descs, 3379 rxq->descs_phys); 3380 3381 rxq->descs = NULL; 3382 rxq->last_desc = 0; 3383 rxq->next_desc_to_proc = 0; 3384 rxq->descs_phys = 0; 3385 rxq->first_to_refill = 0; 3386 rxq->refill_num = 0; 3387 } 3388 3389 static int mvneta_txq_sw_init(struct mvneta_port *pp, 3390 struct mvneta_tx_queue *txq) 3391 { 3392 int cpu; 3393 3394 txq->size = pp->tx_ring_size; 3395 3396 /* A queue must always have room for at least one skb. 3397 * Therefore, stop the queue when the free entries reaches 3398 * the maximum number of descriptors per skb. 3399 */ 3400 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; 3401 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; 3402 3403 /* Allocate memory for TX descriptors */ 3404 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 3405 txq->size * MVNETA_DESC_ALIGNED_SIZE, 3406 &txq->descs_phys, GFP_KERNEL); 3407 if (!txq->descs) 3408 return -ENOMEM; 3409 3410 txq->last_desc = txq->size - 1; 3411 3412 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); 3413 if (!txq->buf) 3414 return -ENOMEM; 3415 3416 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 3417 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, 3418 txq->size * TSO_HEADER_SIZE, 3419 &txq->tso_hdrs_phys, GFP_KERNEL); 3420 if (!txq->tso_hdrs) 3421 return -ENOMEM; 3422 3423 /* Setup XPS mapping */ 3424 if (pp->neta_armada3700) 3425 cpu = 0; 3426 else if (txq_number > 1) 3427 cpu = txq->id % num_present_cpus(); 3428 else 3429 cpu = pp->rxq_def % num_present_cpus(); 3430 cpumask_set_cpu(cpu, &txq->affinity_mask); 3431 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); 3432 3433 return 0; 3434 } 3435 3436 static void mvneta_txq_hw_init(struct mvneta_port *pp, 3437 struct mvneta_tx_queue *txq) 3438 { 3439 /* Set maximum bandwidth for enabled TXQs */ 3440 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); 3441 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); 3442 3443 /* Set Tx descriptors queue starting address */ 3444 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); 3445 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); 3446 3447 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 3448 } 3449 3450 /* Create and initialize a tx queue */ 3451 static int mvneta_txq_init(struct mvneta_port *pp, 3452 struct mvneta_tx_queue *txq) 3453 { 3454 int ret; 3455 3456 ret = mvneta_txq_sw_init(pp, txq); 3457 if (ret < 0) 3458 return ret; 3459 3460 mvneta_txq_hw_init(pp, txq); 3461 3462 return 0; 3463 } 3464 3465 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ 3466 static void mvneta_txq_sw_deinit(struct mvneta_port *pp, 3467 struct mvneta_tx_queue *txq) 3468 { 3469 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 3470 3471 kfree(txq->buf); 3472 3473 if (txq->tso_hdrs) 3474 dma_free_coherent(pp->dev->dev.parent, 3475 txq->size * TSO_HEADER_SIZE, 3476 txq->tso_hdrs, txq->tso_hdrs_phys); 3477 if (txq->descs) 3478 dma_free_coherent(pp->dev->dev.parent, 3479 txq->size * MVNETA_DESC_ALIGNED_SIZE, 3480 txq->descs, txq->descs_phys); 3481 3482 netdev_tx_reset_queue(nq); 3483 3484 txq->descs = NULL; 3485 txq->last_desc = 0; 3486 txq->next_desc_to_proc = 0; 3487 txq->descs_phys = 0; 3488 } 3489 3490 static void mvneta_txq_hw_deinit(struct mvneta_port *pp, 3491 struct mvneta_tx_queue *txq) 3492 { 3493 /* Set minimum bandwidth for disabled TXQs */ 3494 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); 3495 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); 3496 3497 /* Set Tx descriptors queue starting address and size */ 3498 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); 3499 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); 3500 } 3501 3502 static void mvneta_txq_deinit(struct mvneta_port *pp, 3503 struct mvneta_tx_queue *txq) 3504 { 3505 mvneta_txq_sw_deinit(pp, txq); 3506 mvneta_txq_hw_deinit(pp, txq); 3507 } 3508 3509 /* Cleanup all Tx queues */ 3510 static void mvneta_cleanup_txqs(struct mvneta_port *pp) 3511 { 3512 int queue; 3513 3514 for (queue = 0; queue < txq_number; queue++) 3515 mvneta_txq_deinit(pp, &pp->txqs[queue]); 3516 } 3517 3518 /* Cleanup all Rx queues */ 3519 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) 3520 { 3521 int queue; 3522 3523 for (queue = 0; queue < rxq_number; queue++) 3524 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 3525 } 3526 3527 3528 /* Init all Rx queues */ 3529 static int mvneta_setup_rxqs(struct mvneta_port *pp) 3530 { 3531 int queue; 3532 3533 for (queue = 0; queue < rxq_number; queue++) { 3534 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); 3535 3536 if (err) { 3537 netdev_err(pp->dev, "%s: can't create rxq=%d\n", 3538 __func__, queue); 3539 mvneta_cleanup_rxqs(pp); 3540 return err; 3541 } 3542 } 3543 3544 return 0; 3545 } 3546 3547 /* Init all tx queues */ 3548 static int mvneta_setup_txqs(struct mvneta_port *pp) 3549 { 3550 int queue; 3551 3552 for (queue = 0; queue < txq_number; queue++) { 3553 int err = mvneta_txq_init(pp, &pp->txqs[queue]); 3554 if (err) { 3555 netdev_err(pp->dev, "%s: can't create txq=%d\n", 3556 __func__, queue); 3557 mvneta_cleanup_txqs(pp); 3558 return err; 3559 } 3560 } 3561 3562 return 0; 3563 } 3564 3565 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) 3566 { 3567 int ret; 3568 3569 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); 3570 if (ret) 3571 return ret; 3572 3573 return phy_power_on(pp->comphy); 3574 } 3575 3576 static int mvneta_config_interface(struct mvneta_port *pp, 3577 phy_interface_t interface) 3578 { 3579 int ret = 0; 3580 3581 if (pp->comphy) { 3582 if (interface == PHY_INTERFACE_MODE_SGMII || 3583 interface == PHY_INTERFACE_MODE_1000BASEX || 3584 interface == PHY_INTERFACE_MODE_2500BASEX) { 3585 ret = mvneta_comphy_init(pp, interface); 3586 } 3587 } else { 3588 switch (interface) { 3589 case PHY_INTERFACE_MODE_QSGMII: 3590 mvreg_write(pp, MVNETA_SERDES_CFG, 3591 MVNETA_QSGMII_SERDES_PROTO); 3592 break; 3593 3594 case PHY_INTERFACE_MODE_SGMII: 3595 case PHY_INTERFACE_MODE_1000BASEX: 3596 mvreg_write(pp, MVNETA_SERDES_CFG, 3597 MVNETA_SGMII_SERDES_PROTO); 3598 break; 3599 3600 case PHY_INTERFACE_MODE_2500BASEX: 3601 mvreg_write(pp, MVNETA_SERDES_CFG, 3602 MVNETA_HSGMII_SERDES_PROTO); 3603 break; 3604 default: 3605 break; 3606 } 3607 } 3608 3609 pp->phy_interface = interface; 3610 3611 return ret; 3612 } 3613 3614 static void mvneta_start_dev(struct mvneta_port *pp) 3615 { 3616 int cpu; 3617 3618 WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); 3619 3620 mvneta_max_rx_size_set(pp, pp->pkt_size); 3621 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 3622 3623 /* start the Rx/Tx activity */ 3624 mvneta_port_enable(pp); 3625 3626 if (!pp->neta_armada3700) { 3627 /* Enable polling on the port */ 3628 for_each_online_cpu(cpu) { 3629 struct mvneta_pcpu_port *port = 3630 per_cpu_ptr(pp->ports, cpu); 3631 3632 napi_enable(&port->napi); 3633 } 3634 } else { 3635 napi_enable(&pp->napi); 3636 } 3637 3638 /* Unmask interrupts. It has to be done from each CPU */ 3639 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 3640 3641 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 3642 MVNETA_CAUSE_PHY_STATUS_CHANGE | 3643 MVNETA_CAUSE_LINK_CHANGE); 3644 3645 phylink_start(pp->phylink); 3646 3647 /* We may have called phylink_speed_down before */ 3648 phylink_speed_up(pp->phylink); 3649 3650 netif_tx_start_all_queues(pp->dev); 3651 3652 clear_bit(__MVNETA_DOWN, &pp->state); 3653 } 3654 3655 static void mvneta_stop_dev(struct mvneta_port *pp) 3656 { 3657 unsigned int cpu; 3658 3659 set_bit(__MVNETA_DOWN, &pp->state); 3660 3661 if (device_may_wakeup(&pp->dev->dev)) 3662 phylink_speed_down(pp->phylink, false); 3663 3664 phylink_stop(pp->phylink); 3665 3666 if (!pp->neta_armada3700) { 3667 for_each_online_cpu(cpu) { 3668 struct mvneta_pcpu_port *port = 3669 per_cpu_ptr(pp->ports, cpu); 3670 3671 napi_disable(&port->napi); 3672 } 3673 } else { 3674 napi_disable(&pp->napi); 3675 } 3676 3677 netif_carrier_off(pp->dev); 3678 3679 mvneta_port_down(pp); 3680 netif_tx_stop_all_queues(pp->dev); 3681 3682 /* Stop the port activity */ 3683 mvneta_port_disable(pp); 3684 3685 /* Clear all ethernet port interrupts */ 3686 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); 3687 3688 /* Mask all ethernet port interrupts */ 3689 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 3690 3691 mvneta_tx_reset(pp); 3692 mvneta_rx_reset(pp); 3693 3694 WARN_ON(phy_power_off(pp->comphy)); 3695 } 3696 3697 static void mvneta_percpu_enable(void *arg) 3698 { 3699 struct mvneta_port *pp = arg; 3700 3701 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); 3702 } 3703 3704 static void mvneta_percpu_disable(void *arg) 3705 { 3706 struct mvneta_port *pp = arg; 3707 3708 disable_percpu_irq(pp->dev->irq); 3709 } 3710 3711 /* Change the device mtu */ 3712 static int mvneta_change_mtu(struct net_device *dev, int mtu) 3713 { 3714 struct mvneta_port *pp = netdev_priv(dev); 3715 int ret; 3716 3717 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { 3718 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", 3719 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); 3720 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); 3721 } 3722 3723 if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) { 3724 netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu); 3725 return -EINVAL; 3726 } 3727 3728 dev->mtu = mtu; 3729 3730 if (!netif_running(dev)) { 3731 if (pp->bm_priv) 3732 mvneta_bm_update_mtu(pp, mtu); 3733 3734 netdev_update_features(dev); 3735 return 0; 3736 } 3737 3738 /* The interface is running, so we have to force a 3739 * reallocation of the queues 3740 */ 3741 mvneta_stop_dev(pp); 3742 on_each_cpu(mvneta_percpu_disable, pp, true); 3743 3744 mvneta_cleanup_txqs(pp); 3745 mvneta_cleanup_rxqs(pp); 3746 3747 if (pp->bm_priv) 3748 mvneta_bm_update_mtu(pp, mtu); 3749 3750 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); 3751 3752 ret = mvneta_setup_rxqs(pp); 3753 if (ret) { 3754 netdev_err(dev, "unable to setup rxqs after MTU change\n"); 3755 return ret; 3756 } 3757 3758 ret = mvneta_setup_txqs(pp); 3759 if (ret) { 3760 netdev_err(dev, "unable to setup txqs after MTU change\n"); 3761 return ret; 3762 } 3763 3764 on_each_cpu(mvneta_percpu_enable, pp, true); 3765 mvneta_start_dev(pp); 3766 3767 netdev_update_features(dev); 3768 3769 return 0; 3770 } 3771 3772 static netdev_features_t mvneta_fix_features(struct net_device *dev, 3773 netdev_features_t features) 3774 { 3775 struct mvneta_port *pp = netdev_priv(dev); 3776 3777 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { 3778 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 3779 netdev_info(dev, 3780 "Disable IP checksum for MTU greater than %dB\n", 3781 pp->tx_csum_limit); 3782 } 3783 3784 return features; 3785 } 3786 3787 /* Get mac address */ 3788 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) 3789 { 3790 u32 mac_addr_l, mac_addr_h; 3791 3792 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); 3793 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); 3794 addr[0] = (mac_addr_h >> 24) & 0xFF; 3795 addr[1] = (mac_addr_h >> 16) & 0xFF; 3796 addr[2] = (mac_addr_h >> 8) & 0xFF; 3797 addr[3] = mac_addr_h & 0xFF; 3798 addr[4] = (mac_addr_l >> 8) & 0xFF; 3799 addr[5] = mac_addr_l & 0xFF; 3800 } 3801 3802 /* Handle setting mac address */ 3803 static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 3804 { 3805 struct mvneta_port *pp = netdev_priv(dev); 3806 struct sockaddr *sockaddr = addr; 3807 int ret; 3808 3809 ret = eth_prepare_mac_addr_change(dev, addr); 3810 if (ret < 0) 3811 return ret; 3812 /* Remove previous address table entry */ 3813 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 3814 3815 /* Set new addr in hw */ 3816 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); 3817 3818 eth_commit_mac_addr_change(dev, addr); 3819 return 0; 3820 } 3821 3822 static void mvneta_validate(struct phylink_config *config, 3823 unsigned long *supported, 3824 struct phylink_link_state *state) 3825 { 3826 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 3827 3828 /* We only support QSGMII, SGMII, 802.3z and RGMII modes. 3829 * When in 802.3z mode, we must have AN enabled: 3830 * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... 3831 * When <PortType> = 1 (1000BASE-X) this field must be set to 1." 3832 */ 3833 if (phy_interface_mode_is_8023z(state->interface) && 3834 !phylink_test(state->advertising, Autoneg)) { 3835 linkmode_zero(supported); 3836 return; 3837 } 3838 3839 /* Allow all the expected bits */ 3840 phylink_set(mask, Autoneg); 3841 phylink_set_port_modes(mask); 3842 3843 /* Asymmetric pause is unsupported */ 3844 phylink_set(mask, Pause); 3845 3846 /* Half-duplex at speeds higher than 100Mbit is unsupported */ 3847 if (state->interface != PHY_INTERFACE_MODE_2500BASEX) { 3848 phylink_set(mask, 1000baseT_Full); 3849 phylink_set(mask, 1000baseX_Full); 3850 } 3851 3852 if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { 3853 phylink_set(mask, 2500baseT_Full); 3854 phylink_set(mask, 2500baseX_Full); 3855 } 3856 3857 if (!phy_interface_mode_is_8023z(state->interface)) { 3858 /* 10M and 100M are only supported in non-802.3z mode */ 3859 phylink_set(mask, 10baseT_Half); 3860 phylink_set(mask, 10baseT_Full); 3861 phylink_set(mask, 100baseT_Half); 3862 phylink_set(mask, 100baseT_Full); 3863 } 3864 3865 linkmode_and(supported, supported, mask); 3866 linkmode_and(state->advertising, state->advertising, mask); 3867 } 3868 3869 static void mvneta_mac_pcs_get_state(struct phylink_config *config, 3870 struct phylink_link_state *state) 3871 { 3872 struct net_device *ndev = to_net_dev(config->dev); 3873 struct mvneta_port *pp = netdev_priv(ndev); 3874 u32 gmac_stat; 3875 3876 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 3877 3878 if (gmac_stat & MVNETA_GMAC_SPEED_1000) 3879 state->speed = 3880 state->interface == PHY_INTERFACE_MODE_2500BASEX ? 3881 SPEED_2500 : SPEED_1000; 3882 else if (gmac_stat & MVNETA_GMAC_SPEED_100) 3883 state->speed = SPEED_100; 3884 else 3885 state->speed = SPEED_10; 3886 3887 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); 3888 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); 3889 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); 3890 3891 state->pause = 0; 3892 if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) 3893 state->pause |= MLO_PAUSE_RX; 3894 if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) 3895 state->pause |= MLO_PAUSE_TX; 3896 } 3897 3898 static void mvneta_mac_an_restart(struct phylink_config *config) 3899 { 3900 struct net_device *ndev = to_net_dev(config->dev); 3901 struct mvneta_port *pp = netdev_priv(ndev); 3902 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 3903 3904 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 3905 gmac_an | MVNETA_GMAC_INBAND_RESTART_AN); 3906 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 3907 gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); 3908 } 3909 3910 static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, 3911 const struct phylink_link_state *state) 3912 { 3913 struct net_device *ndev = to_net_dev(config->dev); 3914 struct mvneta_port *pp = netdev_priv(ndev); 3915 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 3916 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 3917 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); 3918 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); 3919 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 3920 3921 new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; 3922 new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | 3923 MVNETA_GMAC2_PORT_RESET); 3924 new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); 3925 new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE; 3926 new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE | 3927 MVNETA_GMAC_INBAND_RESTART_AN | 3928 MVNETA_GMAC_AN_SPEED_EN | 3929 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL | 3930 MVNETA_GMAC_AN_FLOW_CTRL_EN | 3931 MVNETA_GMAC_AN_DUPLEX_EN); 3932 3933 /* Even though it might look weird, when we're configured in 3934 * SGMII or QSGMII mode, the RGMII bit needs to be set. 3935 */ 3936 new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII; 3937 3938 if (state->interface == PHY_INTERFACE_MODE_QSGMII || 3939 state->interface == PHY_INTERFACE_MODE_SGMII || 3940 phy_interface_mode_is_8023z(state->interface)) 3941 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; 3942 3943 if (phylink_test(state->advertising, Pause)) 3944 new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; 3945 3946 if (!phylink_autoneg_inband(mode)) { 3947 /* Phy or fixed speed - nothing to do, leave the 3948 * configured speed, duplex and flow control as-is. 3949 */ 3950 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 3951 /* SGMII mode receives the state from the PHY */ 3952 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; 3953 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; 3954 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | 3955 MVNETA_GMAC_FORCE_LINK_PASS | 3956 MVNETA_GMAC_CONFIG_MII_SPEED | 3957 MVNETA_GMAC_CONFIG_GMII_SPEED | 3958 MVNETA_GMAC_CONFIG_FULL_DUPLEX)) | 3959 MVNETA_GMAC_INBAND_AN_ENABLE | 3960 MVNETA_GMAC_AN_SPEED_EN | 3961 MVNETA_GMAC_AN_DUPLEX_EN; 3962 } else { 3963 /* 802.3z negotiation - only 1000base-X */ 3964 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; 3965 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; 3966 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | 3967 MVNETA_GMAC_FORCE_LINK_PASS | 3968 MVNETA_GMAC_CONFIG_MII_SPEED)) | 3969 MVNETA_GMAC_INBAND_AN_ENABLE | 3970 MVNETA_GMAC_CONFIG_GMII_SPEED | 3971 /* The MAC only supports FD mode */ 3972 MVNETA_GMAC_CONFIG_FULL_DUPLEX; 3973 3974 if (state->pause & MLO_PAUSE_AN && state->an_enabled) 3975 new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN; 3976 } 3977 3978 /* Armada 370 documentation says we can only change the port mode 3979 * and in-band enable when the link is down, so force it down 3980 * while making these changes. We also do this for GMAC_CTRL2 3981 */ 3982 if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X || 3983 (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE || 3984 (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) { 3985 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 3986 (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) | 3987 MVNETA_GMAC_FORCE_LINK_DOWN); 3988 } 3989 3990 3991 /* When at 2.5G, the link partner can send frames with shortened 3992 * preambles. 3993 */ 3994 if (state->interface == PHY_INTERFACE_MODE_2500BASEX) 3995 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; 3996 3997 if (pp->phy_interface != state->interface) { 3998 if (pp->comphy) 3999 WARN_ON(phy_power_off(pp->comphy)); 4000 WARN_ON(mvneta_config_interface(pp, state->interface)); 4001 } 4002 4003 if (new_ctrl0 != gmac_ctrl0) 4004 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); 4005 if (new_ctrl2 != gmac_ctrl2) 4006 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); 4007 if (new_ctrl4 != gmac_ctrl4) 4008 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); 4009 if (new_clk != gmac_clk) 4010 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); 4011 if (new_an != gmac_an) 4012 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an); 4013 4014 if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { 4015 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 4016 MVNETA_GMAC2_PORT_RESET) != 0) 4017 continue; 4018 } 4019 } 4020 4021 static void mvneta_set_eee(struct mvneta_port *pp, bool enable) 4022 { 4023 u32 lpi_ctl1; 4024 4025 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); 4026 if (enable) 4027 lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; 4028 else 4029 lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; 4030 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); 4031 } 4032 4033 static void mvneta_mac_link_down(struct phylink_config *config, 4034 unsigned int mode, phy_interface_t interface) 4035 { 4036 struct net_device *ndev = to_net_dev(config->dev); 4037 struct mvneta_port *pp = netdev_priv(ndev); 4038 u32 val; 4039 4040 mvneta_port_down(pp); 4041 4042 if (!phylink_autoneg_inband(mode)) { 4043 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4044 val &= ~MVNETA_GMAC_FORCE_LINK_PASS; 4045 val |= MVNETA_GMAC_FORCE_LINK_DOWN; 4046 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4047 } 4048 4049 pp->eee_active = false; 4050 mvneta_set_eee(pp, false); 4051 } 4052 4053 static void mvneta_mac_link_up(struct phylink_config *config, 4054 struct phy_device *phy, 4055 unsigned int mode, phy_interface_t interface, 4056 int speed, int duplex, 4057 bool tx_pause, bool rx_pause) 4058 { 4059 struct net_device *ndev = to_net_dev(config->dev); 4060 struct mvneta_port *pp = netdev_priv(ndev); 4061 u32 val; 4062 4063 if (!phylink_autoneg_inband(mode)) { 4064 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4065 val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN | 4066 MVNETA_GMAC_CONFIG_MII_SPEED | 4067 MVNETA_GMAC_CONFIG_GMII_SPEED | 4068 MVNETA_GMAC_CONFIG_FLOW_CTRL | 4069 MVNETA_GMAC_CONFIG_FULL_DUPLEX); 4070 val |= MVNETA_GMAC_FORCE_LINK_PASS; 4071 4072 if (speed == SPEED_1000 || speed == SPEED_2500) 4073 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 4074 else if (speed == SPEED_100) 4075 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 4076 4077 if (duplex == DUPLEX_FULL) 4078 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 4079 4080 if (tx_pause || rx_pause) 4081 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; 4082 4083 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4084 } else { 4085 /* When inband doesn't cover flow control or flow control is 4086 * disabled, we need to manually configure it. This bit will 4087 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset. 4088 */ 4089 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4090 val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL; 4091 4092 if (tx_pause || rx_pause) 4093 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; 4094 4095 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4096 } 4097 4098 mvneta_port_up(pp); 4099 4100 if (phy && pp->eee_enabled) { 4101 pp->eee_active = phy_init_eee(phy, 0) >= 0; 4102 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); 4103 } 4104 } 4105 4106 static const struct phylink_mac_ops mvneta_phylink_ops = { 4107 .validate = mvneta_validate, 4108 .mac_pcs_get_state = mvneta_mac_pcs_get_state, 4109 .mac_an_restart = mvneta_mac_an_restart, 4110 .mac_config = mvneta_mac_config, 4111 .mac_link_down = mvneta_mac_link_down, 4112 .mac_link_up = mvneta_mac_link_up, 4113 }; 4114 4115 static int mvneta_mdio_probe(struct mvneta_port *pp) 4116 { 4117 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 4118 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); 4119 4120 if (err) 4121 netdev_err(pp->dev, "could not attach PHY: %d\n", err); 4122 4123 phylink_ethtool_get_wol(pp->phylink, &wol); 4124 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); 4125 4126 /* PHY WoL may be enabled but device wakeup disabled */ 4127 if (wol.supported) 4128 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); 4129 4130 return err; 4131 } 4132 4133 static void mvneta_mdio_remove(struct mvneta_port *pp) 4134 { 4135 phylink_disconnect_phy(pp->phylink); 4136 } 4137 4138 /* Electing a CPU must be done in an atomic way: it should be done 4139 * after or before the removal/insertion of a CPU and this function is 4140 * not reentrant. 4141 */ 4142 static void mvneta_percpu_elect(struct mvneta_port *pp) 4143 { 4144 int elected_cpu = 0, max_cpu, cpu, i = 0; 4145 4146 /* Use the cpu associated to the rxq when it is online, in all 4147 * the other cases, use the cpu 0 which can't be offline. 4148 */ 4149 if (cpu_online(pp->rxq_def)) 4150 elected_cpu = pp->rxq_def; 4151 4152 max_cpu = num_present_cpus(); 4153 4154 for_each_online_cpu(cpu) { 4155 int rxq_map = 0, txq_map = 0; 4156 int rxq; 4157 4158 for (rxq = 0; rxq < rxq_number; rxq++) 4159 if ((rxq % max_cpu) == cpu) 4160 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 4161 4162 if (cpu == elected_cpu) 4163 /* Map the default receive queue to the elected CPU */ 4164 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); 4165 4166 /* We update the TX queue map only if we have one 4167 * queue. In this case we associate the TX queue to 4168 * the CPU bound to the default RX queue 4169 */ 4170 if (txq_number == 1) 4171 txq_map = (cpu == elected_cpu) ? 4172 MVNETA_CPU_TXQ_ACCESS(1) : 0; 4173 else 4174 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 4175 MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 4176 4177 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 4178 4179 /* Update the interrupt mask on each CPU according the 4180 * new mapping 4181 */ 4182 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 4183 pp, true); 4184 i++; 4185 4186 } 4187 }; 4188 4189 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) 4190 { 4191 int other_cpu; 4192 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4193 node_online); 4194 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 4195 4196 /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts 4197 * are routed to CPU 0, so we don't need all the cpu-hotplug support 4198 */ 4199 if (pp->neta_armada3700) 4200 return 0; 4201 4202 spin_lock(&pp->lock); 4203 /* 4204 * Configuring the driver for a new CPU while the driver is 4205 * stopping is racy, so just avoid it. 4206 */ 4207 if (pp->is_stopped) { 4208 spin_unlock(&pp->lock); 4209 return 0; 4210 } 4211 netif_tx_stop_all_queues(pp->dev); 4212 4213 /* 4214 * We have to synchronise on tha napi of each CPU except the one 4215 * just being woken up 4216 */ 4217 for_each_online_cpu(other_cpu) { 4218 if (other_cpu != cpu) { 4219 struct mvneta_pcpu_port *other_port = 4220 per_cpu_ptr(pp->ports, other_cpu); 4221 4222 napi_synchronize(&other_port->napi); 4223 } 4224 } 4225 4226 /* Mask all ethernet port interrupts */ 4227 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4228 napi_enable(&port->napi); 4229 4230 /* 4231 * Enable per-CPU interrupts on the CPU that is 4232 * brought up. 4233 */ 4234 mvneta_percpu_enable(pp); 4235 4236 /* 4237 * Enable per-CPU interrupt on the one CPU we care 4238 * about. 4239 */ 4240 mvneta_percpu_elect(pp); 4241 4242 /* Unmask all ethernet port interrupts */ 4243 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 4244 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 4245 MVNETA_CAUSE_PHY_STATUS_CHANGE | 4246 MVNETA_CAUSE_LINK_CHANGE); 4247 netif_tx_start_all_queues(pp->dev); 4248 spin_unlock(&pp->lock); 4249 return 0; 4250 } 4251 4252 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node) 4253 { 4254 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4255 node_online); 4256 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 4257 4258 /* 4259 * Thanks to this lock we are sure that any pending cpu election is 4260 * done. 4261 */ 4262 spin_lock(&pp->lock); 4263 /* Mask all ethernet port interrupts */ 4264 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4265 spin_unlock(&pp->lock); 4266 4267 napi_synchronize(&port->napi); 4268 napi_disable(&port->napi); 4269 /* Disable per-CPU interrupts on the CPU that is brought down. */ 4270 mvneta_percpu_disable(pp); 4271 return 0; 4272 } 4273 4274 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) 4275 { 4276 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4277 node_dead); 4278 4279 /* Check if a new CPU must be elected now this on is down */ 4280 spin_lock(&pp->lock); 4281 mvneta_percpu_elect(pp); 4282 spin_unlock(&pp->lock); 4283 /* Unmask all ethernet port interrupts */ 4284 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 4285 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 4286 MVNETA_CAUSE_PHY_STATUS_CHANGE | 4287 MVNETA_CAUSE_LINK_CHANGE); 4288 netif_tx_start_all_queues(pp->dev); 4289 return 0; 4290 } 4291 4292 static int mvneta_open(struct net_device *dev) 4293 { 4294 struct mvneta_port *pp = netdev_priv(dev); 4295 int ret; 4296 4297 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 4298 4299 ret = mvneta_setup_rxqs(pp); 4300 if (ret) 4301 return ret; 4302 4303 ret = mvneta_setup_txqs(pp); 4304 if (ret) 4305 goto err_cleanup_rxqs; 4306 4307 /* Connect to port interrupt line */ 4308 if (pp->neta_armada3700) 4309 ret = request_irq(pp->dev->irq, mvneta_isr, 0, 4310 dev->name, pp); 4311 else 4312 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, 4313 dev->name, pp->ports); 4314 if (ret) { 4315 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); 4316 goto err_cleanup_txqs; 4317 } 4318 4319 if (!pp->neta_armada3700) { 4320 /* Enable per-CPU interrupt on all the CPU to handle our RX 4321 * queue interrupts 4322 */ 4323 on_each_cpu(mvneta_percpu_enable, pp, true); 4324 4325 pp->is_stopped = false; 4326 /* Register a CPU notifier to handle the case where our CPU 4327 * might be taken offline. 4328 */ 4329 ret = cpuhp_state_add_instance_nocalls(online_hpstate, 4330 &pp->node_online); 4331 if (ret) 4332 goto err_free_irq; 4333 4334 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4335 &pp->node_dead); 4336 if (ret) 4337 goto err_free_online_hp; 4338 } 4339 4340 ret = mvneta_mdio_probe(pp); 4341 if (ret < 0) { 4342 netdev_err(dev, "cannot probe MDIO bus\n"); 4343 goto err_free_dead_hp; 4344 } 4345 4346 mvneta_start_dev(pp); 4347 4348 return 0; 4349 4350 err_free_dead_hp: 4351 if (!pp->neta_armada3700) 4352 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4353 &pp->node_dead); 4354 err_free_online_hp: 4355 if (!pp->neta_armada3700) 4356 cpuhp_state_remove_instance_nocalls(online_hpstate, 4357 &pp->node_online); 4358 err_free_irq: 4359 if (pp->neta_armada3700) { 4360 free_irq(pp->dev->irq, pp); 4361 } else { 4362 on_each_cpu(mvneta_percpu_disable, pp, true); 4363 free_percpu_irq(pp->dev->irq, pp->ports); 4364 } 4365 err_cleanup_txqs: 4366 mvneta_cleanup_txqs(pp); 4367 err_cleanup_rxqs: 4368 mvneta_cleanup_rxqs(pp); 4369 return ret; 4370 } 4371 4372 /* Stop the port, free port interrupt line */ 4373 static int mvneta_stop(struct net_device *dev) 4374 { 4375 struct mvneta_port *pp = netdev_priv(dev); 4376 4377 if (!pp->neta_armada3700) { 4378 /* Inform that we are stopping so we don't want to setup the 4379 * driver for new CPUs in the notifiers. The code of the 4380 * notifier for CPU online is protected by the same spinlock, 4381 * so when we get the lock, the notifer work is done. 4382 */ 4383 spin_lock(&pp->lock); 4384 pp->is_stopped = true; 4385 spin_unlock(&pp->lock); 4386 4387 mvneta_stop_dev(pp); 4388 mvneta_mdio_remove(pp); 4389 4390 cpuhp_state_remove_instance_nocalls(online_hpstate, 4391 &pp->node_online); 4392 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4393 &pp->node_dead); 4394 on_each_cpu(mvneta_percpu_disable, pp, true); 4395 free_percpu_irq(dev->irq, pp->ports); 4396 } else { 4397 mvneta_stop_dev(pp); 4398 mvneta_mdio_remove(pp); 4399 free_irq(dev->irq, pp); 4400 } 4401 4402 mvneta_cleanup_rxqs(pp); 4403 mvneta_cleanup_txqs(pp); 4404 4405 return 0; 4406 } 4407 4408 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4409 { 4410 struct mvneta_port *pp = netdev_priv(dev); 4411 4412 return phylink_mii_ioctl(pp->phylink, ifr, cmd); 4413 } 4414 4415 static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, 4416 struct netlink_ext_ack *extack) 4417 { 4418 bool need_update, running = netif_running(dev); 4419 struct mvneta_port *pp = netdev_priv(dev); 4420 struct bpf_prog *old_prog; 4421 4422 if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { 4423 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP"); 4424 return -EOPNOTSUPP; 4425 } 4426 4427 if (pp->bm_priv) { 4428 NL_SET_ERR_MSG_MOD(extack, 4429 "Hardware Buffer Management not supported on XDP"); 4430 return -EOPNOTSUPP; 4431 } 4432 4433 need_update = !!pp->xdp_prog != !!prog; 4434 if (running && need_update) 4435 mvneta_stop(dev); 4436 4437 old_prog = xchg(&pp->xdp_prog, prog); 4438 if (old_prog) 4439 bpf_prog_put(old_prog); 4440 4441 if (running && need_update) 4442 return mvneta_open(dev); 4443 4444 return 0; 4445 } 4446 4447 static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) 4448 { 4449 switch (xdp->command) { 4450 case XDP_SETUP_PROG: 4451 return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); 4452 default: 4453 return -EINVAL; 4454 } 4455 } 4456 4457 /* Ethtool methods */ 4458 4459 /* Set link ksettings (phy address, speed) for ethtools */ 4460 static int 4461 mvneta_ethtool_set_link_ksettings(struct net_device *ndev, 4462 const struct ethtool_link_ksettings *cmd) 4463 { 4464 struct mvneta_port *pp = netdev_priv(ndev); 4465 4466 return phylink_ethtool_ksettings_set(pp->phylink, cmd); 4467 } 4468 4469 /* Get link ksettings for ethtools */ 4470 static int 4471 mvneta_ethtool_get_link_ksettings(struct net_device *ndev, 4472 struct ethtool_link_ksettings *cmd) 4473 { 4474 struct mvneta_port *pp = netdev_priv(ndev); 4475 4476 return phylink_ethtool_ksettings_get(pp->phylink, cmd); 4477 } 4478 4479 static int mvneta_ethtool_nway_reset(struct net_device *dev) 4480 { 4481 struct mvneta_port *pp = netdev_priv(dev); 4482 4483 return phylink_ethtool_nway_reset(pp->phylink); 4484 } 4485 4486 /* Set interrupt coalescing for ethtools */ 4487 static int 4488 mvneta_ethtool_set_coalesce(struct net_device *dev, 4489 struct ethtool_coalesce *c, 4490 struct kernel_ethtool_coalesce *kernel_coal, 4491 struct netlink_ext_ack *extack) 4492 { 4493 struct mvneta_port *pp = netdev_priv(dev); 4494 int queue; 4495 4496 for (queue = 0; queue < rxq_number; queue++) { 4497 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 4498 rxq->time_coal = c->rx_coalesce_usecs; 4499 rxq->pkts_coal = c->rx_max_coalesced_frames; 4500 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 4501 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 4502 } 4503 4504 for (queue = 0; queue < txq_number; queue++) { 4505 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 4506 txq->done_pkts_coal = c->tx_max_coalesced_frames; 4507 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 4508 } 4509 4510 return 0; 4511 } 4512 4513 /* get coalescing for ethtools */ 4514 static int 4515 mvneta_ethtool_get_coalesce(struct net_device *dev, 4516 struct ethtool_coalesce *c, 4517 struct kernel_ethtool_coalesce *kernel_coal, 4518 struct netlink_ext_ack *extack) 4519 { 4520 struct mvneta_port *pp = netdev_priv(dev); 4521 4522 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; 4523 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; 4524 4525 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; 4526 return 0; 4527 } 4528 4529 4530 static void mvneta_ethtool_get_drvinfo(struct net_device *dev, 4531 struct ethtool_drvinfo *drvinfo) 4532 { 4533 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, 4534 sizeof(drvinfo->driver)); 4535 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, 4536 sizeof(drvinfo->version)); 4537 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 4538 sizeof(drvinfo->bus_info)); 4539 } 4540 4541 4542 static void mvneta_ethtool_get_ringparam(struct net_device *netdev, 4543 struct ethtool_ringparam *ring) 4544 { 4545 struct mvneta_port *pp = netdev_priv(netdev); 4546 4547 ring->rx_max_pending = MVNETA_MAX_RXD; 4548 ring->tx_max_pending = MVNETA_MAX_TXD; 4549 ring->rx_pending = pp->rx_ring_size; 4550 ring->tx_pending = pp->tx_ring_size; 4551 } 4552 4553 static int mvneta_ethtool_set_ringparam(struct net_device *dev, 4554 struct ethtool_ringparam *ring) 4555 { 4556 struct mvneta_port *pp = netdev_priv(dev); 4557 4558 if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) 4559 return -EINVAL; 4560 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 4561 ring->rx_pending : MVNETA_MAX_RXD; 4562 4563 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, 4564 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); 4565 if (pp->tx_ring_size != ring->tx_pending) 4566 netdev_warn(dev, "TX queue size set to %u (requested %u)\n", 4567 pp->tx_ring_size, ring->tx_pending); 4568 4569 if (netif_running(dev)) { 4570 mvneta_stop(dev); 4571 if (mvneta_open(dev)) { 4572 netdev_err(dev, 4573 "error on opening device after ring param change\n"); 4574 return -ENOMEM; 4575 } 4576 } 4577 4578 return 0; 4579 } 4580 4581 static void mvneta_ethtool_get_pauseparam(struct net_device *dev, 4582 struct ethtool_pauseparam *pause) 4583 { 4584 struct mvneta_port *pp = netdev_priv(dev); 4585 4586 phylink_ethtool_get_pauseparam(pp->phylink, pause); 4587 } 4588 4589 static int mvneta_ethtool_set_pauseparam(struct net_device *dev, 4590 struct ethtool_pauseparam *pause) 4591 { 4592 struct mvneta_port *pp = netdev_priv(dev); 4593 4594 return phylink_ethtool_set_pauseparam(pp->phylink, pause); 4595 } 4596 4597 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, 4598 u8 *data) 4599 { 4600 if (sset == ETH_SS_STATS) { 4601 int i; 4602 4603 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 4604 memcpy(data + i * ETH_GSTRING_LEN, 4605 mvneta_statistics[i].name, ETH_GSTRING_LEN); 4606 } 4607 } 4608 4609 static void 4610 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, 4611 struct mvneta_ethtool_stats *es) 4612 { 4613 unsigned int start; 4614 int cpu; 4615 4616 for_each_possible_cpu(cpu) { 4617 struct mvneta_pcpu_stats *stats; 4618 u64 skb_alloc_error; 4619 u64 refill_error; 4620 u64 xdp_redirect; 4621 u64 xdp_xmit_err; 4622 u64 xdp_tx_err; 4623 u64 xdp_pass; 4624 u64 xdp_drop; 4625 u64 xdp_xmit; 4626 u64 xdp_tx; 4627 4628 stats = per_cpu_ptr(pp->stats, cpu); 4629 do { 4630 start = u64_stats_fetch_begin_irq(&stats->syncp); 4631 skb_alloc_error = stats->es.skb_alloc_error; 4632 refill_error = stats->es.refill_error; 4633 xdp_redirect = stats->es.ps.xdp_redirect; 4634 xdp_pass = stats->es.ps.xdp_pass; 4635 xdp_drop = stats->es.ps.xdp_drop; 4636 xdp_xmit = stats->es.ps.xdp_xmit; 4637 xdp_xmit_err = stats->es.ps.xdp_xmit_err; 4638 xdp_tx = stats->es.ps.xdp_tx; 4639 xdp_tx_err = stats->es.ps.xdp_tx_err; 4640 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 4641 4642 es->skb_alloc_error += skb_alloc_error; 4643 es->refill_error += refill_error; 4644 es->ps.xdp_redirect += xdp_redirect; 4645 es->ps.xdp_pass += xdp_pass; 4646 es->ps.xdp_drop += xdp_drop; 4647 es->ps.xdp_xmit += xdp_xmit; 4648 es->ps.xdp_xmit_err += xdp_xmit_err; 4649 es->ps.xdp_tx += xdp_tx; 4650 es->ps.xdp_tx_err += xdp_tx_err; 4651 } 4652 } 4653 4654 static void mvneta_ethtool_update_stats(struct mvneta_port *pp) 4655 { 4656 struct mvneta_ethtool_stats stats = {}; 4657 const struct mvneta_statistic *s; 4658 void __iomem *base = pp->base; 4659 u32 high, low; 4660 u64 val; 4661 int i; 4662 4663 mvneta_ethtool_update_pcpu_stats(pp, &stats); 4664 for (i = 0, s = mvneta_statistics; 4665 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); 4666 s++, i++) { 4667 switch (s->type) { 4668 case T_REG_32: 4669 val = readl_relaxed(base + s->offset); 4670 pp->ethtool_stats[i] += val; 4671 break; 4672 case T_REG_64: 4673 /* Docs say to read low 32-bit then high */ 4674 low = readl_relaxed(base + s->offset); 4675 high = readl_relaxed(base + s->offset + 4); 4676 val = (u64)high << 32 | low; 4677 pp->ethtool_stats[i] += val; 4678 break; 4679 case T_SW: 4680 switch (s->offset) { 4681 case ETHTOOL_STAT_EEE_WAKEUP: 4682 val = phylink_get_eee_err(pp->phylink); 4683 pp->ethtool_stats[i] += val; 4684 break; 4685 case ETHTOOL_STAT_SKB_ALLOC_ERR: 4686 pp->ethtool_stats[i] = stats.skb_alloc_error; 4687 break; 4688 case ETHTOOL_STAT_REFILL_ERR: 4689 pp->ethtool_stats[i] = stats.refill_error; 4690 break; 4691 case ETHTOOL_XDP_REDIRECT: 4692 pp->ethtool_stats[i] = stats.ps.xdp_redirect; 4693 break; 4694 case ETHTOOL_XDP_PASS: 4695 pp->ethtool_stats[i] = stats.ps.xdp_pass; 4696 break; 4697 case ETHTOOL_XDP_DROP: 4698 pp->ethtool_stats[i] = stats.ps.xdp_drop; 4699 break; 4700 case ETHTOOL_XDP_TX: 4701 pp->ethtool_stats[i] = stats.ps.xdp_tx; 4702 break; 4703 case ETHTOOL_XDP_TX_ERR: 4704 pp->ethtool_stats[i] = stats.ps.xdp_tx_err; 4705 break; 4706 case ETHTOOL_XDP_XMIT: 4707 pp->ethtool_stats[i] = stats.ps.xdp_xmit; 4708 break; 4709 case ETHTOOL_XDP_XMIT_ERR: 4710 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; 4711 break; 4712 } 4713 break; 4714 } 4715 } 4716 } 4717 4718 static void mvneta_ethtool_get_stats(struct net_device *dev, 4719 struct ethtool_stats *stats, u64 *data) 4720 { 4721 struct mvneta_port *pp = netdev_priv(dev); 4722 int i; 4723 4724 mvneta_ethtool_update_stats(pp); 4725 4726 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 4727 *data++ = pp->ethtool_stats[i]; 4728 } 4729 4730 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) 4731 { 4732 if (sset == ETH_SS_STATS) 4733 return ARRAY_SIZE(mvneta_statistics); 4734 return -EOPNOTSUPP; 4735 } 4736 4737 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) 4738 { 4739 return MVNETA_RSS_LU_TABLE_SIZE; 4740 } 4741 4742 static int mvneta_ethtool_get_rxnfc(struct net_device *dev, 4743 struct ethtool_rxnfc *info, 4744 u32 *rules __always_unused) 4745 { 4746 switch (info->cmd) { 4747 case ETHTOOL_GRXRINGS: 4748 info->data = rxq_number; 4749 return 0; 4750 case ETHTOOL_GRXFH: 4751 return -EOPNOTSUPP; 4752 default: 4753 return -EOPNOTSUPP; 4754 } 4755 } 4756 4757 static int mvneta_config_rss(struct mvneta_port *pp) 4758 { 4759 int cpu; 4760 u32 val; 4761 4762 netif_tx_stop_all_queues(pp->dev); 4763 4764 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4765 4766 if (!pp->neta_armada3700) { 4767 /* We have to synchronise on the napi of each CPU */ 4768 for_each_online_cpu(cpu) { 4769 struct mvneta_pcpu_port *pcpu_port = 4770 per_cpu_ptr(pp->ports, cpu); 4771 4772 napi_synchronize(&pcpu_port->napi); 4773 napi_disable(&pcpu_port->napi); 4774 } 4775 } else { 4776 napi_synchronize(&pp->napi); 4777 napi_disable(&pp->napi); 4778 } 4779 4780 pp->rxq_def = pp->indir[0]; 4781 4782 /* Update unicast mapping */ 4783 mvneta_set_rx_mode(pp->dev); 4784 4785 /* Update val of portCfg register accordingly with all RxQueue types */ 4786 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 4787 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 4788 4789 /* Update the elected CPU matching the new rxq_def */ 4790 spin_lock(&pp->lock); 4791 mvneta_percpu_elect(pp); 4792 spin_unlock(&pp->lock); 4793 4794 if (!pp->neta_armada3700) { 4795 /* We have to synchronise on the napi of each CPU */ 4796 for_each_online_cpu(cpu) { 4797 struct mvneta_pcpu_port *pcpu_port = 4798 per_cpu_ptr(pp->ports, cpu); 4799 4800 napi_enable(&pcpu_port->napi); 4801 } 4802 } else { 4803 napi_enable(&pp->napi); 4804 } 4805 4806 netif_tx_start_all_queues(pp->dev); 4807 4808 return 0; 4809 } 4810 4811 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, 4812 const u8 *key, const u8 hfunc) 4813 { 4814 struct mvneta_port *pp = netdev_priv(dev); 4815 4816 /* Current code for Armada 3700 doesn't support RSS features yet */ 4817 if (pp->neta_armada3700) 4818 return -EOPNOTSUPP; 4819 4820 /* We require at least one supported parameter to be changed 4821 * and no change in any of the unsupported parameters 4822 */ 4823 if (key || 4824 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 4825 return -EOPNOTSUPP; 4826 4827 if (!indir) 4828 return 0; 4829 4830 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); 4831 4832 return mvneta_config_rss(pp); 4833 } 4834 4835 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 4836 u8 *hfunc) 4837 { 4838 struct mvneta_port *pp = netdev_priv(dev); 4839 4840 /* Current code for Armada 3700 doesn't support RSS features yet */ 4841 if (pp->neta_armada3700) 4842 return -EOPNOTSUPP; 4843 4844 if (hfunc) 4845 *hfunc = ETH_RSS_HASH_TOP; 4846 4847 if (!indir) 4848 return 0; 4849 4850 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); 4851 4852 return 0; 4853 } 4854 4855 static void mvneta_ethtool_get_wol(struct net_device *dev, 4856 struct ethtool_wolinfo *wol) 4857 { 4858 struct mvneta_port *pp = netdev_priv(dev); 4859 4860 phylink_ethtool_get_wol(pp->phylink, wol); 4861 } 4862 4863 static int mvneta_ethtool_set_wol(struct net_device *dev, 4864 struct ethtool_wolinfo *wol) 4865 { 4866 struct mvneta_port *pp = netdev_priv(dev); 4867 int ret; 4868 4869 ret = phylink_ethtool_set_wol(pp->phylink, wol); 4870 if (!ret) 4871 device_set_wakeup_enable(&dev->dev, !!wol->wolopts); 4872 4873 return ret; 4874 } 4875 4876 static int mvneta_ethtool_get_eee(struct net_device *dev, 4877 struct ethtool_eee *eee) 4878 { 4879 struct mvneta_port *pp = netdev_priv(dev); 4880 u32 lpi_ctl0; 4881 4882 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); 4883 4884 eee->eee_enabled = pp->eee_enabled; 4885 eee->eee_active = pp->eee_active; 4886 eee->tx_lpi_enabled = pp->tx_lpi_enabled; 4887 eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; 4888 4889 return phylink_ethtool_get_eee(pp->phylink, eee); 4890 } 4891 4892 static int mvneta_ethtool_set_eee(struct net_device *dev, 4893 struct ethtool_eee *eee) 4894 { 4895 struct mvneta_port *pp = netdev_priv(dev); 4896 u32 lpi_ctl0; 4897 4898 /* The Armada 37x documents do not give limits for this other than 4899 * it being an 8-bit register. 4900 */ 4901 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) 4902 return -EINVAL; 4903 4904 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); 4905 lpi_ctl0 &= ~(0xff << 8); 4906 lpi_ctl0 |= eee->tx_lpi_timer << 8; 4907 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); 4908 4909 pp->eee_enabled = eee->eee_enabled; 4910 pp->tx_lpi_enabled = eee->tx_lpi_enabled; 4911 4912 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); 4913 4914 return phylink_ethtool_set_eee(pp->phylink, eee); 4915 } 4916 4917 static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) 4918 { 4919 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0); 4920 } 4921 4922 static void mvneta_setup_rx_prio_map(struct mvneta_port *pp) 4923 { 4924 u32 val = 0; 4925 int i; 4926 4927 for (i = 0; i < rxq_number; i++) 4928 val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]); 4929 4930 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val); 4931 } 4932 4933 static int mvneta_setup_mqprio(struct net_device *dev, 4934 struct tc_mqprio_qopt *qopt) 4935 { 4936 struct mvneta_port *pp = netdev_priv(dev); 4937 u8 num_tc; 4938 int i; 4939 4940 qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 4941 num_tc = qopt->num_tc; 4942 4943 if (num_tc > rxq_number) 4944 return -EINVAL; 4945 4946 if (!num_tc) { 4947 mvneta_clear_rx_prio_map(pp); 4948 netdev_reset_tc(dev); 4949 return 0; 4950 } 4951 4952 memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map)); 4953 4954 mvneta_setup_rx_prio_map(pp); 4955 4956 netdev_set_num_tc(dev, qopt->num_tc); 4957 for (i = 0; i < qopt->num_tc; i++) 4958 netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]); 4959 4960 return 0; 4961 } 4962 4963 static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type, 4964 void *type_data) 4965 { 4966 switch (type) { 4967 case TC_SETUP_QDISC_MQPRIO: 4968 return mvneta_setup_mqprio(dev, type_data); 4969 default: 4970 return -EOPNOTSUPP; 4971 } 4972 } 4973 4974 static const struct net_device_ops mvneta_netdev_ops = { 4975 .ndo_open = mvneta_open, 4976 .ndo_stop = mvneta_stop, 4977 .ndo_start_xmit = mvneta_tx, 4978 .ndo_set_rx_mode = mvneta_set_rx_mode, 4979 .ndo_set_mac_address = mvneta_set_mac_addr, 4980 .ndo_change_mtu = mvneta_change_mtu, 4981 .ndo_fix_features = mvneta_fix_features, 4982 .ndo_get_stats64 = mvneta_get_stats64, 4983 .ndo_eth_ioctl = mvneta_ioctl, 4984 .ndo_bpf = mvneta_xdp, 4985 .ndo_xdp_xmit = mvneta_xdp_xmit, 4986 .ndo_setup_tc = mvneta_setup_tc, 4987 }; 4988 4989 static const struct ethtool_ops mvneta_eth_tool_ops = { 4990 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 4991 ETHTOOL_COALESCE_MAX_FRAMES, 4992 .nway_reset = mvneta_ethtool_nway_reset, 4993 .get_link = ethtool_op_get_link, 4994 .set_coalesce = mvneta_ethtool_set_coalesce, 4995 .get_coalesce = mvneta_ethtool_get_coalesce, 4996 .get_drvinfo = mvneta_ethtool_get_drvinfo, 4997 .get_ringparam = mvneta_ethtool_get_ringparam, 4998 .set_ringparam = mvneta_ethtool_set_ringparam, 4999 .get_pauseparam = mvneta_ethtool_get_pauseparam, 5000 .set_pauseparam = mvneta_ethtool_set_pauseparam, 5001 .get_strings = mvneta_ethtool_get_strings, 5002 .get_ethtool_stats = mvneta_ethtool_get_stats, 5003 .get_sset_count = mvneta_ethtool_get_sset_count, 5004 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, 5005 .get_rxnfc = mvneta_ethtool_get_rxnfc, 5006 .get_rxfh = mvneta_ethtool_get_rxfh, 5007 .set_rxfh = mvneta_ethtool_set_rxfh, 5008 .get_link_ksettings = mvneta_ethtool_get_link_ksettings, 5009 .set_link_ksettings = mvneta_ethtool_set_link_ksettings, 5010 .get_wol = mvneta_ethtool_get_wol, 5011 .set_wol = mvneta_ethtool_set_wol, 5012 .get_eee = mvneta_ethtool_get_eee, 5013 .set_eee = mvneta_ethtool_set_eee, 5014 }; 5015 5016 /* Initialize hw */ 5017 static int mvneta_init(struct device *dev, struct mvneta_port *pp) 5018 { 5019 int queue; 5020 5021 /* Disable port */ 5022 mvneta_port_disable(pp); 5023 5024 /* Set port default values */ 5025 mvneta_defaults_set(pp); 5026 5027 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); 5028 if (!pp->txqs) 5029 return -ENOMEM; 5030 5031 /* Initialize TX descriptor rings */ 5032 for (queue = 0; queue < txq_number; queue++) { 5033 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5034 txq->id = queue; 5035 txq->size = pp->tx_ring_size; 5036 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 5037 } 5038 5039 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); 5040 if (!pp->rxqs) 5041 return -ENOMEM; 5042 5043 /* Create Rx descriptor rings */ 5044 for (queue = 0; queue < rxq_number; queue++) { 5045 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5046 rxq->id = queue; 5047 rxq->size = pp->rx_ring_size; 5048 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; 5049 rxq->time_coal = MVNETA_RX_COAL_USEC; 5050 rxq->buf_virt_addr 5051 = devm_kmalloc_array(pp->dev->dev.parent, 5052 rxq->size, 5053 sizeof(*rxq->buf_virt_addr), 5054 GFP_KERNEL); 5055 if (!rxq->buf_virt_addr) 5056 return -ENOMEM; 5057 } 5058 5059 return 0; 5060 } 5061 5062 /* platform glue : initialize decoding windows */ 5063 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 5064 const struct mbus_dram_target_info *dram) 5065 { 5066 u32 win_enable; 5067 u32 win_protect; 5068 int i; 5069 5070 for (i = 0; i < 6; i++) { 5071 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 5072 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 5073 5074 if (i < 4) 5075 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 5076 } 5077 5078 win_enable = 0x3f; 5079 win_protect = 0; 5080 5081 if (dram) { 5082 for (i = 0; i < dram->num_cs; i++) { 5083 const struct mbus_dram_window *cs = dram->cs + i; 5084 5085 mvreg_write(pp, MVNETA_WIN_BASE(i), 5086 (cs->base & 0xffff0000) | 5087 (cs->mbus_attr << 8) | 5088 dram->mbus_dram_target_id); 5089 5090 mvreg_write(pp, MVNETA_WIN_SIZE(i), 5091 (cs->size - 1) & 0xffff0000); 5092 5093 win_enable &= ~(1 << i); 5094 win_protect |= 3 << (2 * i); 5095 } 5096 } else { 5097 /* For Armada3700 open default 4GB Mbus window, leaving 5098 * arbitration of target/attribute to a different layer 5099 * of configuration. 5100 */ 5101 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); 5102 win_enable &= ~BIT(0); 5103 win_protect = 3; 5104 } 5105 5106 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 5107 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 5108 } 5109 5110 /* Power up the port */ 5111 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 5112 { 5113 /* MAC Cause register should be cleared */ 5114 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 5115 5116 if (phy_mode != PHY_INTERFACE_MODE_QSGMII && 5117 phy_mode != PHY_INTERFACE_MODE_SGMII && 5118 !phy_interface_mode_is_8023z(phy_mode) && 5119 !phy_interface_mode_is_rgmii(phy_mode)) 5120 return -EINVAL; 5121 5122 return 0; 5123 } 5124 5125 /* Device initialization routine */ 5126 static int mvneta_probe(struct platform_device *pdev) 5127 { 5128 struct device_node *dn = pdev->dev.of_node; 5129 struct device_node *bm_node; 5130 struct mvneta_port *pp; 5131 struct net_device *dev; 5132 struct phylink *phylink; 5133 struct phy *comphy; 5134 char hw_mac_addr[ETH_ALEN]; 5135 phy_interface_t phy_mode; 5136 const char *mac_from; 5137 int tx_csum_limit; 5138 int err; 5139 int cpu; 5140 5141 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port), 5142 txq_number, rxq_number); 5143 if (!dev) 5144 return -ENOMEM; 5145 5146 dev->irq = irq_of_parse_and_map(dn, 0); 5147 if (dev->irq == 0) 5148 return -EINVAL; 5149 5150 err = of_get_phy_mode(dn, &phy_mode); 5151 if (err) { 5152 dev_err(&pdev->dev, "incorrect phy-mode\n"); 5153 goto err_free_irq; 5154 } 5155 5156 comphy = devm_of_phy_get(&pdev->dev, dn, NULL); 5157 if (comphy == ERR_PTR(-EPROBE_DEFER)) { 5158 err = -EPROBE_DEFER; 5159 goto err_free_irq; 5160 } else if (IS_ERR(comphy)) { 5161 comphy = NULL; 5162 } 5163 5164 pp = netdev_priv(dev); 5165 spin_lock_init(&pp->lock); 5166 5167 pp->phylink_config.dev = &dev->dev; 5168 pp->phylink_config.type = PHYLINK_NETDEV; 5169 phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); 5170 __set_bit(PHY_INTERFACE_MODE_QSGMII, 5171 pp->phylink_config.supported_interfaces); 5172 if (comphy) { 5173 /* If a COMPHY is present, we can support any of the serdes 5174 * modes and switch between them. 5175 */ 5176 __set_bit(PHY_INTERFACE_MODE_SGMII, 5177 pp->phylink_config.supported_interfaces); 5178 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 5179 pp->phylink_config.supported_interfaces); 5180 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 5181 pp->phylink_config.supported_interfaces); 5182 } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { 5183 /* No COMPHY, with only 2500BASE-X mode supported */ 5184 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 5185 pp->phylink_config.supported_interfaces); 5186 } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || 5187 phy_mode == PHY_INTERFACE_MODE_SGMII) { 5188 /* No COMPHY, we can switch between 1000BASE-X and SGMII */ 5189 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 5190 pp->phylink_config.supported_interfaces); 5191 __set_bit(PHY_INTERFACE_MODE_SGMII, 5192 pp->phylink_config.supported_interfaces); 5193 } 5194 5195 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, 5196 phy_mode, &mvneta_phylink_ops); 5197 if (IS_ERR(phylink)) { 5198 err = PTR_ERR(phylink); 5199 goto err_free_irq; 5200 } 5201 5202 dev->tx_queue_len = MVNETA_MAX_TXD; 5203 dev->watchdog_timeo = 5 * HZ; 5204 dev->netdev_ops = &mvneta_netdev_ops; 5205 5206 dev->ethtool_ops = &mvneta_eth_tool_ops; 5207 5208 pp->phylink = phylink; 5209 pp->comphy = comphy; 5210 pp->phy_interface = phy_mode; 5211 pp->dn = dn; 5212 5213 pp->rxq_def = rxq_def; 5214 pp->indir[0] = rxq_def; 5215 5216 /* Get special SoC configurations */ 5217 if (of_device_is_compatible(dn, "marvell,armada-3700-neta")) 5218 pp->neta_armada3700 = true; 5219 5220 pp->clk = devm_clk_get(&pdev->dev, "core"); 5221 if (IS_ERR(pp->clk)) 5222 pp->clk = devm_clk_get(&pdev->dev, NULL); 5223 if (IS_ERR(pp->clk)) { 5224 err = PTR_ERR(pp->clk); 5225 goto err_free_phylink; 5226 } 5227 5228 clk_prepare_enable(pp->clk); 5229 5230 pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); 5231 if (!IS_ERR(pp->clk_bus)) 5232 clk_prepare_enable(pp->clk_bus); 5233 5234 pp->base = devm_platform_ioremap_resource(pdev, 0); 5235 if (IS_ERR(pp->base)) { 5236 err = PTR_ERR(pp->base); 5237 goto err_clk; 5238 } 5239 5240 /* Alloc per-cpu port structure */ 5241 pp->ports = alloc_percpu(struct mvneta_pcpu_port); 5242 if (!pp->ports) { 5243 err = -ENOMEM; 5244 goto err_clk; 5245 } 5246 5247 /* Alloc per-cpu stats */ 5248 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); 5249 if (!pp->stats) { 5250 err = -ENOMEM; 5251 goto err_free_ports; 5252 } 5253 5254 err = of_get_ethdev_address(dn, dev); 5255 if (!err) { 5256 mac_from = "device tree"; 5257 } else { 5258 mvneta_get_mac_addr(pp, hw_mac_addr); 5259 if (is_valid_ether_addr(hw_mac_addr)) { 5260 mac_from = "hardware"; 5261 eth_hw_addr_set(dev, hw_mac_addr); 5262 } else { 5263 mac_from = "random"; 5264 eth_hw_addr_random(dev); 5265 } 5266 } 5267 5268 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { 5269 if (tx_csum_limit < 0 || 5270 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { 5271 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 5272 dev_info(&pdev->dev, 5273 "Wrong TX csum limit in DT, set to %dB\n", 5274 MVNETA_TX_CSUM_DEF_SIZE); 5275 } 5276 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { 5277 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 5278 } else { 5279 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; 5280 } 5281 5282 pp->tx_csum_limit = tx_csum_limit; 5283 5284 pp->dram_target_info = mv_mbus_dram_info(); 5285 /* Armada3700 requires setting default configuration of Mbus 5286 * windows, however without using filled mbus_dram_target_info 5287 * structure. 5288 */ 5289 if (pp->dram_target_info || pp->neta_armada3700) 5290 mvneta_conf_mbus_windows(pp, pp->dram_target_info); 5291 5292 pp->tx_ring_size = MVNETA_MAX_TXD; 5293 pp->rx_ring_size = MVNETA_MAX_RXD; 5294 5295 pp->dev = dev; 5296 SET_NETDEV_DEV(dev, &pdev->dev); 5297 5298 pp->id = global_port_id++; 5299 5300 /* Obtain access to BM resources if enabled and already initialized */ 5301 bm_node = of_parse_phandle(dn, "buffer-manager", 0); 5302 if (bm_node) { 5303 pp->bm_priv = mvneta_bm_get(bm_node); 5304 if (pp->bm_priv) { 5305 err = mvneta_bm_port_init(pdev, pp); 5306 if (err < 0) { 5307 dev_info(&pdev->dev, 5308 "use SW buffer management\n"); 5309 mvneta_bm_put(pp->bm_priv); 5310 pp->bm_priv = NULL; 5311 } 5312 } 5313 /* Set RX packet offset correction for platforms, whose 5314 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit 5315 * platforms and 0B for 32-bit ones. 5316 */ 5317 pp->rx_offset_correction = max(0, 5318 NET_SKB_PAD - 5319 MVNETA_RX_PKT_OFFSET_CORRECTION); 5320 } 5321 of_node_put(bm_node); 5322 5323 /* sw buffer management */ 5324 if (!pp->bm_priv) 5325 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 5326 5327 err = mvneta_init(&pdev->dev, pp); 5328 if (err < 0) 5329 goto err_netdev; 5330 5331 err = mvneta_port_power_up(pp, pp->phy_interface); 5332 if (err < 0) { 5333 dev_err(&pdev->dev, "can't power up port\n"); 5334 goto err_netdev; 5335 } 5336 5337 /* Armada3700 network controller does not support per-cpu 5338 * operation, so only single NAPI should be initialized. 5339 */ 5340 if (pp->neta_armada3700) { 5341 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); 5342 } else { 5343 for_each_present_cpu(cpu) { 5344 struct mvneta_pcpu_port *port = 5345 per_cpu_ptr(pp->ports, cpu); 5346 5347 netif_napi_add(dev, &port->napi, mvneta_poll, 5348 NAPI_POLL_WEIGHT); 5349 port->pp = pp; 5350 } 5351 } 5352 5353 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5354 NETIF_F_TSO | NETIF_F_RXCSUM; 5355 dev->hw_features |= dev->features; 5356 dev->vlan_features |= dev->features; 5357 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 5358 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; 5359 5360 /* MTU range: 68 - 9676 */ 5361 dev->min_mtu = ETH_MIN_MTU; 5362 /* 9676 == 9700 - 20 and rounding to 8 */ 5363 dev->max_mtu = 9676; 5364 5365 err = register_netdev(dev); 5366 if (err < 0) { 5367 dev_err(&pdev->dev, "failed to register\n"); 5368 goto err_netdev; 5369 } 5370 5371 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 5372 dev->dev_addr); 5373 5374 platform_set_drvdata(pdev, pp->dev); 5375 5376 return 0; 5377 5378 err_netdev: 5379 if (pp->bm_priv) { 5380 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 5381 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 5382 1 << pp->id); 5383 mvneta_bm_put(pp->bm_priv); 5384 } 5385 free_percpu(pp->stats); 5386 err_free_ports: 5387 free_percpu(pp->ports); 5388 err_clk: 5389 clk_disable_unprepare(pp->clk_bus); 5390 clk_disable_unprepare(pp->clk); 5391 err_free_phylink: 5392 if (pp->phylink) 5393 phylink_destroy(pp->phylink); 5394 err_free_irq: 5395 irq_dispose_mapping(dev->irq); 5396 return err; 5397 } 5398 5399 /* Device removal routine */ 5400 static int mvneta_remove(struct platform_device *pdev) 5401 { 5402 struct net_device *dev = platform_get_drvdata(pdev); 5403 struct mvneta_port *pp = netdev_priv(dev); 5404 5405 unregister_netdev(dev); 5406 clk_disable_unprepare(pp->clk_bus); 5407 clk_disable_unprepare(pp->clk); 5408 free_percpu(pp->ports); 5409 free_percpu(pp->stats); 5410 irq_dispose_mapping(dev->irq); 5411 phylink_destroy(pp->phylink); 5412 5413 if (pp->bm_priv) { 5414 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 5415 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 5416 1 << pp->id); 5417 mvneta_bm_put(pp->bm_priv); 5418 } 5419 5420 return 0; 5421 } 5422 5423 #ifdef CONFIG_PM_SLEEP 5424 static int mvneta_suspend(struct device *device) 5425 { 5426 int queue; 5427 struct net_device *dev = dev_get_drvdata(device); 5428 struct mvneta_port *pp = netdev_priv(dev); 5429 5430 if (!netif_running(dev)) 5431 goto clean_exit; 5432 5433 if (!pp->neta_armada3700) { 5434 spin_lock(&pp->lock); 5435 pp->is_stopped = true; 5436 spin_unlock(&pp->lock); 5437 5438 cpuhp_state_remove_instance_nocalls(online_hpstate, 5439 &pp->node_online); 5440 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 5441 &pp->node_dead); 5442 } 5443 5444 rtnl_lock(); 5445 mvneta_stop_dev(pp); 5446 rtnl_unlock(); 5447 5448 for (queue = 0; queue < rxq_number; queue++) { 5449 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5450 5451 mvneta_rxq_drop_pkts(pp, rxq); 5452 } 5453 5454 for (queue = 0; queue < txq_number; queue++) { 5455 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5456 5457 mvneta_txq_hw_deinit(pp, txq); 5458 } 5459 5460 clean_exit: 5461 netif_device_detach(dev); 5462 clk_disable_unprepare(pp->clk_bus); 5463 clk_disable_unprepare(pp->clk); 5464 5465 return 0; 5466 } 5467 5468 static int mvneta_resume(struct device *device) 5469 { 5470 struct platform_device *pdev = to_platform_device(device); 5471 struct net_device *dev = dev_get_drvdata(device); 5472 struct mvneta_port *pp = netdev_priv(dev); 5473 int err, queue; 5474 5475 clk_prepare_enable(pp->clk); 5476 if (!IS_ERR(pp->clk_bus)) 5477 clk_prepare_enable(pp->clk_bus); 5478 if (pp->dram_target_info || pp->neta_armada3700) 5479 mvneta_conf_mbus_windows(pp, pp->dram_target_info); 5480 if (pp->bm_priv) { 5481 err = mvneta_bm_port_init(pdev, pp); 5482 if (err < 0) { 5483 dev_info(&pdev->dev, "use SW buffer management\n"); 5484 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 5485 pp->bm_priv = NULL; 5486 } 5487 } 5488 mvneta_defaults_set(pp); 5489 err = mvneta_port_power_up(pp, pp->phy_interface); 5490 if (err < 0) { 5491 dev_err(device, "can't power up port\n"); 5492 return err; 5493 } 5494 5495 netif_device_attach(dev); 5496 5497 if (!netif_running(dev)) 5498 return 0; 5499 5500 for (queue = 0; queue < rxq_number; queue++) { 5501 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5502 5503 rxq->next_desc_to_proc = 0; 5504 mvneta_rxq_hw_init(pp, rxq); 5505 } 5506 5507 for (queue = 0; queue < txq_number; queue++) { 5508 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5509 5510 txq->next_desc_to_proc = 0; 5511 mvneta_txq_hw_init(pp, txq); 5512 } 5513 5514 if (!pp->neta_armada3700) { 5515 spin_lock(&pp->lock); 5516 pp->is_stopped = false; 5517 spin_unlock(&pp->lock); 5518 cpuhp_state_add_instance_nocalls(online_hpstate, 5519 &pp->node_online); 5520 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 5521 &pp->node_dead); 5522 } 5523 5524 rtnl_lock(); 5525 mvneta_start_dev(pp); 5526 rtnl_unlock(); 5527 mvneta_set_rx_mode(dev); 5528 5529 return 0; 5530 } 5531 #endif 5532 5533 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume); 5534 5535 static const struct of_device_id mvneta_match[] = { 5536 { .compatible = "marvell,armada-370-neta" }, 5537 { .compatible = "marvell,armada-xp-neta" }, 5538 { .compatible = "marvell,armada-3700-neta" }, 5539 { } 5540 }; 5541 MODULE_DEVICE_TABLE(of, mvneta_match); 5542 5543 static struct platform_driver mvneta_driver = { 5544 .probe = mvneta_probe, 5545 .remove = mvneta_remove, 5546 .driver = { 5547 .name = MVNETA_DRIVER_NAME, 5548 .of_match_table = mvneta_match, 5549 .pm = &mvneta_pm_ops, 5550 }, 5551 }; 5552 5553 static int __init mvneta_driver_init(void) 5554 { 5555 int ret; 5556 5557 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online", 5558 mvneta_cpu_online, 5559 mvneta_cpu_down_prepare); 5560 if (ret < 0) 5561 goto out; 5562 online_hpstate = ret; 5563 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead", 5564 NULL, mvneta_cpu_dead); 5565 if (ret) 5566 goto err_dead; 5567 5568 ret = platform_driver_register(&mvneta_driver); 5569 if (ret) 5570 goto err; 5571 return 0; 5572 5573 err: 5574 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); 5575 err_dead: 5576 cpuhp_remove_multi_state(online_hpstate); 5577 out: 5578 return ret; 5579 } 5580 module_init(mvneta_driver_init); 5581 5582 static void __exit mvneta_driver_exit(void) 5583 { 5584 platform_driver_unregister(&mvneta_driver); 5585 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); 5586 cpuhp_remove_multi_state(online_hpstate); 5587 } 5588 module_exit(mvneta_driver_exit); 5589 5590 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 5591 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 5592 MODULE_LICENSE("GPL"); 5593 5594 module_param(rxq_number, int, 0444); 5595 module_param(txq_number, int, 0444); 5596 5597 module_param(rxq_def, int, 0444); 5598 module_param(rx_copybreak, int, 0644); 5599