1 /* 2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Rami Rosen <rosenr@marvell.com> 7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 8 * 9 * This file is licensed under the terms of the GNU General Public 10 * License version 2. This program is licensed "as is" without any 11 * warranty of any kind, whether express or implied. 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/cpu.h> 16 #include <linux/etherdevice.h> 17 #include <linux/if_vlan.h> 18 #include <linux/inetdevice.h> 19 #include <linux/interrupt.h> 20 #include <linux/io.h> 21 #include <linux/kernel.h> 22 #include <linux/mbus.h> 23 #include <linux/module.h> 24 #include <linux/netdevice.h> 25 #include <linux/of.h> 26 #include <linux/of_address.h> 27 #include <linux/of_irq.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_net.h> 30 #include <linux/phy/phy.h> 31 #include <linux/phy.h> 32 #include <linux/phylink.h> 33 #include <linux/platform_device.h> 34 #include <linux/skbuff.h> 35 #include <net/hwbm.h> 36 #include "mvneta_bm.h" 37 #include <net/ip.h> 38 #include <net/ipv6.h> 39 #include <net/tso.h> 40 #include <net/page_pool.h> 41 #include <linux/bpf_trace.h> 42 43 /* Registers */ 44 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 45 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) 46 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 47 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 48 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 49 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 50 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 51 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 52 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 53 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) 54 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) 55 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) 56 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 57 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) 58 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) 59 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff 60 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) 61 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 62 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 63 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) 64 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 65 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 66 #define MVNETA_PORT_RX_RESET 0x1cc0 67 #define MVNETA_PORT_RX_DMA_RESET BIT(0) 68 #define MVNETA_PHY_ADDR 0x2000 69 #define MVNETA_PHY_ADDR_MASK 0x1f 70 #define MVNETA_MBUS_RETRY 0x2010 71 #define MVNETA_UNIT_INTR_CAUSE 0x2080 72 #define MVNETA_UNIT_CONTROL 0x20B0 73 #define MVNETA_PHY_POLLING_ENABLE BIT(1) 74 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) 75 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 76 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 77 #define MVNETA_BASE_ADDR_ENABLE 0x2290 78 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 79 #define MVNETA_PORT_CONFIG 0x2400 80 #define MVNETA_UNI_PROMISC_MODE BIT(0) 81 #define MVNETA_DEF_RXQ(q) ((q) << 1) 82 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) 83 #define MVNETA_TX_UNSET_ERR_SUM BIT(12) 84 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) 85 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) 86 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) 87 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) 88 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ 89 MVNETA_DEF_RXQ_ARP(q) | \ 90 MVNETA_DEF_RXQ_TCP(q) | \ 91 MVNETA_DEF_RXQ_UDP(q) | \ 92 MVNETA_DEF_RXQ_BPDU(q) | \ 93 MVNETA_TX_UNSET_ERR_SUM | \ 94 MVNETA_RX_CSUM_WITH_PSEUDO_HDR) 95 #define MVNETA_PORT_CONFIG_EXTEND 0x2404 96 #define MVNETA_MAC_ADDR_LOW 0x2414 97 #define MVNETA_MAC_ADDR_HIGH 0x2418 98 #define MVNETA_SDMA_CONFIG 0x241c 99 #define MVNETA_SDMA_BRST_SIZE_16 4 100 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 101 #define MVNETA_RX_NO_DATA_SWAP BIT(4) 102 #define MVNETA_TX_NO_DATA_SWAP BIT(5) 103 #define MVNETA_DESC_SWAP BIT(6) 104 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 105 #define MVNETA_PORT_STATUS 0x2444 106 #define MVNETA_TX_IN_PRGRS BIT(1) 107 #define MVNETA_TX_FIFO_EMPTY BIT(8) 108 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c 109 /* Only exists on Armada XP and Armada 370 */ 110 #define MVNETA_SERDES_CFG 0x24A0 111 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 112 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 113 #define MVNETA_HSGMII_SERDES_PROTO 0x1107 114 #define MVNETA_TYPE_PRIO 0x24bc 115 #define MVNETA_FORCE_UNI BIT(21) 116 #define MVNETA_TXQ_CMD_1 0x24e4 117 #define MVNETA_TXQ_CMD 0x2448 118 #define MVNETA_TXQ_DISABLE_SHIFT 8 119 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff 120 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 121 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488 122 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 123 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) 124 #define MVNETA_ACC_MODE 0x2500 125 #define MVNETA_BM_ADDRESS 0x2504 126 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 127 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff 128 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 129 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) 130 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) 131 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) 132 133 /* Exception Interrupt Port/Queue Cause register 134 * 135 * Their behavior depend of the mapping done using the PCPX2Q 136 * registers. For a given CPU if the bit associated to a queue is not 137 * set, then for the register a read from this CPU will always return 138 * 0 and a write won't do anything 139 */ 140 141 #define MVNETA_INTR_NEW_CAUSE 0x25a0 142 #define MVNETA_INTR_NEW_MASK 0x25a4 143 144 /* bits 0..7 = TXQ SENT, one bit per queue. 145 * bits 8..15 = RXQ OCCUP, one bit per queue. 146 * bits 16..23 = RXQ FREE, one bit per queue. 147 * bit 29 = OLD_REG_SUM, see old reg ? 148 * bit 30 = TX_ERR_SUM, one bit for 4 ports 149 * bit 31 = MISC_SUM, one bit for 4 ports 150 */ 151 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) 152 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) 153 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 154 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) 155 #define MVNETA_MISCINTR_INTR_MASK BIT(31) 156 157 #define MVNETA_INTR_OLD_CAUSE 0x25a8 158 #define MVNETA_INTR_OLD_MASK 0x25ac 159 160 /* Data Path Port/Queue Cause Register */ 161 #define MVNETA_INTR_MISC_CAUSE 0x25b0 162 #define MVNETA_INTR_MISC_MASK 0x25b4 163 164 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) 165 #define MVNETA_CAUSE_LINK_CHANGE BIT(1) 166 #define MVNETA_CAUSE_PTP BIT(4) 167 168 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) 169 #define MVNETA_CAUSE_RX_OVERRUN BIT(8) 170 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) 171 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) 172 #define MVNETA_CAUSE_TX_UNDERUN BIT(11) 173 #define MVNETA_CAUSE_PRBS_ERR BIT(12) 174 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) 175 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) 176 177 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 178 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) 179 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) 180 181 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 182 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) 183 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) 184 185 #define MVNETA_INTR_ENABLE 0x25b8 186 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 187 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff 188 189 #define MVNETA_RXQ_CMD 0x2680 190 #define MVNETA_RXQ_DISABLE_SHIFT 8 191 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff 192 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) 193 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) 194 #define MVNETA_GMAC_CTRL_0 0x2c00 195 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 196 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 197 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1) 198 #define MVNETA_GMAC0_PORT_ENABLE BIT(0) 199 #define MVNETA_GMAC_CTRL_2 0x2c08 200 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) 201 #define MVNETA_GMAC2_PCS_ENABLE BIT(3) 202 #define MVNETA_GMAC2_PORT_RGMII BIT(4) 203 #define MVNETA_GMAC2_PORT_RESET BIT(6) 204 #define MVNETA_GMAC_STATUS 0x2c10 205 #define MVNETA_GMAC_LINK_UP BIT(0) 206 #define MVNETA_GMAC_SPEED_1000 BIT(1) 207 #define MVNETA_GMAC_SPEED_100 BIT(2) 208 #define MVNETA_GMAC_FULL_DUPLEX BIT(3) 209 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) 210 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) 211 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) 212 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) 213 #define MVNETA_GMAC_AN_COMPLETE BIT(11) 214 #define MVNETA_GMAC_SYNC_OK BIT(14) 215 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 216 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 217 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 218 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) 219 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3) 220 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4) 221 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 222 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 223 #define MVNETA_GMAC_AN_SPEED_EN BIT(7) 224 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8) 225 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9) 226 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) 227 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 228 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) 229 #define MVNETA_GMAC_CTRL_4 0x2c90 230 #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1) 231 #define MVNETA_MIB_COUNTERS_BASE 0x3000 232 #define MVNETA_MIB_LATE_COLLISION 0x7c 233 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 234 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 235 #define MVNETA_DA_FILT_UCAST_BASE 0x3600 236 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) 237 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) 238 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 239 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) 240 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) 241 #define MVNETA_TXQ_DEC_SENT_SHIFT 16 242 #define MVNETA_TXQ_DEC_SENT_MASK 0xff 243 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) 244 #define MVNETA_TXQ_SENT_DESC_SHIFT 16 245 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 246 #define MVNETA_PORT_TX_RESET 0x3cf0 247 #define MVNETA_PORT_TX_DMA_RESET BIT(0) 248 #define MVNETA_TX_MTU 0x3e0c 249 #define MVNETA_TX_TOKEN_SIZE 0x3e14 250 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff 251 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) 252 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff 253 254 #define MVNETA_LPI_CTRL_0 0x2cc0 255 #define MVNETA_LPI_CTRL_1 0x2cc4 256 #define MVNETA_LPI_REQUEST_ENABLE BIT(0) 257 #define MVNETA_LPI_CTRL_2 0x2cc8 258 #define MVNETA_LPI_STATUS 0x2ccc 259 260 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 261 262 /* Descriptor ring Macros */ 263 #define MVNETA_QUEUE_NEXT_DESC(q, index) \ 264 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 265 266 /* Various constants */ 267 268 /* Coalescing */ 269 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ 270 #define MVNETA_RX_COAL_PKTS 32 271 #define MVNETA_RX_COAL_USEC 100 272 273 /* The two bytes Marvell header. Either contains a special value used 274 * by Marvell switches when a specific hardware mode is enabled (not 275 * supported by this driver) or is filled automatically by zeroes on 276 * the RX side. Those two bytes being at the front of the Ethernet 277 * header, they allow to have the IP header aligned on a 4 bytes 278 * boundary automatically: the hardware skips those two bytes on its 279 * own. 280 */ 281 #define MVNETA_MH_SIZE 2 282 283 #define MVNETA_VLAN_TAG_LEN 4 284 285 #define MVNETA_TX_CSUM_DEF_SIZE 1600 286 #define MVNETA_TX_CSUM_MAX_SIZE 9800 287 #define MVNETA_ACC_MODE_EXT1 1 288 #define MVNETA_ACC_MODE_EXT2 2 289 290 #define MVNETA_MAX_DECODE_WIN 6 291 292 /* Timeout constants */ 293 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 294 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 295 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 296 297 #define MVNETA_TX_MTU_MAX 0x3ffff 298 299 /* The RSS lookup table actually has 256 entries but we do not use 300 * them yet 301 */ 302 #define MVNETA_RSS_LU_TABLE_SIZE 1 303 304 /* Max number of Rx descriptors */ 305 #define MVNETA_MAX_RXD 512 306 307 /* Max number of Tx descriptors */ 308 #define MVNETA_MAX_TXD 1024 309 310 /* Max number of allowed TCP segments for software TSO */ 311 #define MVNETA_MAX_TSO_SEGS 100 312 313 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 314 315 /* descriptor aligned size */ 316 #define MVNETA_DESC_ALIGNED_SIZE 32 317 318 /* Number of bytes to be taken into account by HW when putting incoming data 319 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet 320 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers. 321 */ 322 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64 323 324 #define MVNETA_RX_PKT_SIZE(mtu) \ 325 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 326 ETH_HLEN + ETH_FCS_LEN, \ 327 cache_line_size()) 328 329 /* Driver assumes that the last 3 bits are 0 */ 330 #define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) 331 #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ 332 MVNETA_SKB_HEADROOM)) 333 #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD) 334 335 #define IS_TSO_HEADER(txq, addr) \ 336 ((addr >= txq->tso_hdrs_phys) && \ 337 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) 338 339 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \ 340 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) 341 342 enum { 343 ETHTOOL_STAT_EEE_WAKEUP, 344 ETHTOOL_STAT_SKB_ALLOC_ERR, 345 ETHTOOL_STAT_REFILL_ERR, 346 ETHTOOL_XDP_REDIRECT, 347 ETHTOOL_XDP_PASS, 348 ETHTOOL_XDP_DROP, 349 ETHTOOL_XDP_TX, 350 ETHTOOL_XDP_TX_ERR, 351 ETHTOOL_XDP_XMIT, 352 ETHTOOL_XDP_XMIT_ERR, 353 ETHTOOL_MAX_STATS, 354 }; 355 356 struct mvneta_statistic { 357 unsigned short offset; 358 unsigned short type; 359 const char name[ETH_GSTRING_LEN]; 360 }; 361 362 #define T_REG_32 32 363 #define T_REG_64 64 364 #define T_SW 1 365 366 #define MVNETA_XDP_PASS 0 367 #define MVNETA_XDP_DROPPED BIT(0) 368 #define MVNETA_XDP_TX BIT(1) 369 #define MVNETA_XDP_REDIR BIT(2) 370 371 static const struct mvneta_statistic mvneta_statistics[] = { 372 { 0x3000, T_REG_64, "good_octets_received", }, 373 { 0x3010, T_REG_32, "good_frames_received", }, 374 { 0x3008, T_REG_32, "bad_octets_received", }, 375 { 0x3014, T_REG_32, "bad_frames_received", }, 376 { 0x3018, T_REG_32, "broadcast_frames_received", }, 377 { 0x301c, T_REG_32, "multicast_frames_received", }, 378 { 0x3050, T_REG_32, "unrec_mac_control_received", }, 379 { 0x3058, T_REG_32, "good_fc_received", }, 380 { 0x305c, T_REG_32, "bad_fc_received", }, 381 { 0x3060, T_REG_32, "undersize_received", }, 382 { 0x3064, T_REG_32, "fragments_received", }, 383 { 0x3068, T_REG_32, "oversize_received", }, 384 { 0x306c, T_REG_32, "jabber_received", }, 385 { 0x3070, T_REG_32, "mac_receive_error", }, 386 { 0x3074, T_REG_32, "bad_crc_event", }, 387 { 0x3078, T_REG_32, "collision", }, 388 { 0x307c, T_REG_32, "late_collision", }, 389 { 0x2484, T_REG_32, "rx_discard", }, 390 { 0x2488, T_REG_32, "rx_overrun", }, 391 { 0x3020, T_REG_32, "frames_64_octets", }, 392 { 0x3024, T_REG_32, "frames_65_to_127_octets", }, 393 { 0x3028, T_REG_32, "frames_128_to_255_octets", }, 394 { 0x302c, T_REG_32, "frames_256_to_511_octets", }, 395 { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, 396 { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, 397 { 0x3038, T_REG_64, "good_octets_sent", }, 398 { 0x3040, T_REG_32, "good_frames_sent", }, 399 { 0x3044, T_REG_32, "excessive_collision", }, 400 { 0x3048, T_REG_32, "multicast_frames_sent", }, 401 { 0x304c, T_REG_32, "broadcast_frames_sent", }, 402 { 0x3054, T_REG_32, "fc_sent", }, 403 { 0x300c, T_REG_32, "internal_mac_transmit_err", }, 404 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, 405 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", }, 406 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", }, 407 { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", }, 408 { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", }, 409 { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", }, 410 { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", }, 411 { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", }, 412 { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", }, 413 { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", }, 414 }; 415 416 struct mvneta_stats { 417 u64 rx_packets; 418 u64 rx_bytes; 419 u64 tx_packets; 420 u64 tx_bytes; 421 /* xdp */ 422 u64 xdp_redirect; 423 u64 xdp_pass; 424 u64 xdp_drop; 425 u64 xdp_xmit; 426 u64 xdp_xmit_err; 427 u64 xdp_tx; 428 u64 xdp_tx_err; 429 }; 430 431 struct mvneta_ethtool_stats { 432 struct mvneta_stats ps; 433 u64 skb_alloc_error; 434 u64 refill_error; 435 }; 436 437 struct mvneta_pcpu_stats { 438 struct u64_stats_sync syncp; 439 440 struct mvneta_ethtool_stats es; 441 u64 rx_dropped; 442 u64 rx_errors; 443 }; 444 445 struct mvneta_pcpu_port { 446 /* Pointer to the shared port */ 447 struct mvneta_port *pp; 448 449 /* Pointer to the CPU-local NAPI struct */ 450 struct napi_struct napi; 451 452 /* Cause of the previous interrupt */ 453 u32 cause_rx_tx; 454 }; 455 456 enum { 457 __MVNETA_DOWN, 458 }; 459 460 struct mvneta_port { 461 u8 id; 462 struct mvneta_pcpu_port __percpu *ports; 463 struct mvneta_pcpu_stats __percpu *stats; 464 465 unsigned long state; 466 467 int pkt_size; 468 void __iomem *base; 469 struct mvneta_rx_queue *rxqs; 470 struct mvneta_tx_queue *txqs; 471 struct net_device *dev; 472 struct hlist_node node_online; 473 struct hlist_node node_dead; 474 int rxq_def; 475 /* Protect the access to the percpu interrupt registers, 476 * ensuring that the configuration remains coherent. 477 */ 478 spinlock_t lock; 479 bool is_stopped; 480 481 u32 cause_rx_tx; 482 struct napi_struct napi; 483 484 struct bpf_prog *xdp_prog; 485 486 /* Core clock */ 487 struct clk *clk; 488 /* AXI clock */ 489 struct clk *clk_bus; 490 u8 mcast_count[256]; 491 u16 tx_ring_size; 492 u16 rx_ring_size; 493 494 phy_interface_t phy_interface; 495 struct device_node *dn; 496 unsigned int tx_csum_limit; 497 struct phylink *phylink; 498 struct phylink_config phylink_config; 499 struct phy *comphy; 500 501 struct mvneta_bm *bm_priv; 502 struct mvneta_bm_pool *pool_long; 503 struct mvneta_bm_pool *pool_short; 504 int bm_win_id; 505 506 bool eee_enabled; 507 bool eee_active; 508 bool tx_lpi_enabled; 509 510 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; 511 512 u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; 513 514 /* Flags for special SoC configurations */ 515 bool neta_armada3700; 516 u16 rx_offset_correction; 517 const struct mbus_dram_target_info *dram_target_info; 518 }; 519 520 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the 521 * layout of the transmit and reception DMA descriptors, and their 522 * layout is therefore defined by the hardware design 523 */ 524 525 #define MVNETA_TX_L3_OFF_SHIFT 0 526 #define MVNETA_TX_IP_HLEN_SHIFT 8 527 #define MVNETA_TX_L4_UDP BIT(16) 528 #define MVNETA_TX_L3_IP6 BIT(17) 529 #define MVNETA_TXD_IP_CSUM BIT(18) 530 #define MVNETA_TXD_Z_PAD BIT(19) 531 #define MVNETA_TXD_L_DESC BIT(20) 532 #define MVNETA_TXD_F_DESC BIT(21) 533 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ 534 MVNETA_TXD_L_DESC | \ 535 MVNETA_TXD_F_DESC) 536 #define MVNETA_TX_L4_CSUM_FULL BIT(30) 537 #define MVNETA_TX_L4_CSUM_NOT BIT(31) 538 539 #define MVNETA_RXD_ERR_CRC 0x0 540 #define MVNETA_RXD_BM_POOL_SHIFT 13 541 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) 542 #define MVNETA_RXD_ERR_SUMMARY BIT(16) 543 #define MVNETA_RXD_ERR_OVERRUN BIT(17) 544 #define MVNETA_RXD_ERR_LEN BIT(18) 545 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) 546 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) 547 #define MVNETA_RXD_L3_IP4 BIT(25) 548 #define MVNETA_RXD_LAST_DESC BIT(26) 549 #define MVNETA_RXD_FIRST_DESC BIT(27) 550 #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \ 551 MVNETA_RXD_LAST_DESC) 552 #define MVNETA_RXD_L4_CSUM_OK BIT(30) 553 554 #if defined(__LITTLE_ENDIAN) 555 struct mvneta_tx_desc { 556 u32 command; /* Options used by HW for packet transmitting.*/ 557 u16 reserved1; /* csum_l4 (for future use) */ 558 u16 data_size; /* Data size of transmitted packet in bytes */ 559 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 560 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 561 u32 reserved3[4]; /* Reserved - (for future use) */ 562 }; 563 564 struct mvneta_rx_desc { 565 u32 status; /* Info about received packet */ 566 u16 reserved1; /* pnc_info - (for future use, PnC) */ 567 u16 data_size; /* Size of received packet in bytes */ 568 569 u32 buf_phys_addr; /* Physical address of the buffer */ 570 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 571 572 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 573 u16 reserved3; /* prefetch_cmd, for future use */ 574 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 575 576 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 577 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 578 }; 579 #else 580 struct mvneta_tx_desc { 581 u16 data_size; /* Data size of transmitted packet in bytes */ 582 u16 reserved1; /* csum_l4 (for future use) */ 583 u32 command; /* Options used by HW for packet transmitting.*/ 584 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 585 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 586 u32 reserved3[4]; /* Reserved - (for future use) */ 587 }; 588 589 struct mvneta_rx_desc { 590 u16 data_size; /* Size of received packet in bytes */ 591 u16 reserved1; /* pnc_info - (for future use, PnC) */ 592 u32 status; /* Info about received packet */ 593 594 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 595 u32 buf_phys_addr; /* Physical address of the buffer */ 596 597 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 598 u16 reserved3; /* prefetch_cmd, for future use */ 599 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 600 601 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 602 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 603 }; 604 #endif 605 606 enum mvneta_tx_buf_type { 607 MVNETA_TYPE_SKB, 608 MVNETA_TYPE_XDP_TX, 609 MVNETA_TYPE_XDP_NDO, 610 }; 611 612 struct mvneta_tx_buf { 613 enum mvneta_tx_buf_type type; 614 union { 615 struct xdp_frame *xdpf; 616 struct sk_buff *skb; 617 }; 618 }; 619 620 struct mvneta_tx_queue { 621 /* Number of this TX queue, in the range 0-7 */ 622 u8 id; 623 624 /* Number of TX DMA descriptors in the descriptor ring */ 625 int size; 626 627 /* Number of currently used TX DMA descriptor in the 628 * descriptor ring 629 */ 630 int count; 631 int pending; 632 int tx_stop_threshold; 633 int tx_wake_threshold; 634 635 /* Array of transmitted buffers */ 636 struct mvneta_tx_buf *buf; 637 638 /* Index of last TX DMA descriptor that was inserted */ 639 int txq_put_index; 640 641 /* Index of the TX DMA descriptor to be cleaned up */ 642 int txq_get_index; 643 644 u32 done_pkts_coal; 645 646 /* Virtual address of the TX DMA descriptors array */ 647 struct mvneta_tx_desc *descs; 648 649 /* DMA address of the TX DMA descriptors array */ 650 dma_addr_t descs_phys; 651 652 /* Index of the last TX DMA descriptor */ 653 int last_desc; 654 655 /* Index of the next TX DMA descriptor to process */ 656 int next_desc_to_proc; 657 658 /* DMA buffers for TSO headers */ 659 char *tso_hdrs; 660 661 /* DMA address of TSO headers */ 662 dma_addr_t tso_hdrs_phys; 663 664 /* Affinity mask for CPUs*/ 665 cpumask_t affinity_mask; 666 }; 667 668 struct mvneta_rx_queue { 669 /* rx queue number, in the range 0-7 */ 670 u8 id; 671 672 /* num of rx descriptors in the rx descriptor ring */ 673 int size; 674 675 u32 pkts_coal; 676 u32 time_coal; 677 678 /* page_pool */ 679 struct page_pool *page_pool; 680 struct xdp_rxq_info xdp_rxq; 681 682 /* Virtual address of the RX buffer */ 683 void **buf_virt_addr; 684 685 /* Virtual address of the RX DMA descriptors array */ 686 struct mvneta_rx_desc *descs; 687 688 /* DMA address of the RX DMA descriptors array */ 689 dma_addr_t descs_phys; 690 691 /* Index of the last RX DMA descriptor */ 692 int last_desc; 693 694 /* Index of the next RX DMA descriptor to process */ 695 int next_desc_to_proc; 696 697 /* Index of first RX DMA descriptor to refill */ 698 int first_to_refill; 699 u32 refill_num; 700 }; 701 702 static enum cpuhp_state online_hpstate; 703 /* The hardware supports eight (8) rx queues, but we are only allowing 704 * the first one to be used. Therefore, let's just allocate one queue. 705 */ 706 static int rxq_number = 8; 707 static int txq_number = 8; 708 709 static int rxq_def; 710 711 static int rx_copybreak __read_mostly = 256; 712 713 /* HW BM need that each port be identify by a unique ID */ 714 static int global_port_id; 715 716 #define MVNETA_DRIVER_NAME "mvneta" 717 #define MVNETA_DRIVER_VERSION "1.0" 718 719 /* Utility/helper methods */ 720 721 /* Write helper method */ 722 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) 723 { 724 writel(data, pp->base + offset); 725 } 726 727 /* Read helper method */ 728 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) 729 { 730 return readl(pp->base + offset); 731 } 732 733 /* Increment txq get counter */ 734 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) 735 { 736 txq->txq_get_index++; 737 if (txq->txq_get_index == txq->size) 738 txq->txq_get_index = 0; 739 } 740 741 /* Increment txq put counter */ 742 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) 743 { 744 txq->txq_put_index++; 745 if (txq->txq_put_index == txq->size) 746 txq->txq_put_index = 0; 747 } 748 749 750 /* Clear all MIB counters */ 751 static void mvneta_mib_counters_clear(struct mvneta_port *pp) 752 { 753 int i; 754 755 /* Perform dummy reads from MIB counters */ 756 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) 757 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); 758 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); 759 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); 760 } 761 762 /* Get System Network Statistics */ 763 static void 764 mvneta_get_stats64(struct net_device *dev, 765 struct rtnl_link_stats64 *stats) 766 { 767 struct mvneta_port *pp = netdev_priv(dev); 768 unsigned int start; 769 int cpu; 770 771 for_each_possible_cpu(cpu) { 772 struct mvneta_pcpu_stats *cpu_stats; 773 u64 rx_packets; 774 u64 rx_bytes; 775 u64 rx_dropped; 776 u64 rx_errors; 777 u64 tx_packets; 778 u64 tx_bytes; 779 780 cpu_stats = per_cpu_ptr(pp->stats, cpu); 781 do { 782 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 783 rx_packets = cpu_stats->es.ps.rx_packets; 784 rx_bytes = cpu_stats->es.ps.rx_bytes; 785 rx_dropped = cpu_stats->rx_dropped; 786 rx_errors = cpu_stats->rx_errors; 787 tx_packets = cpu_stats->es.ps.tx_packets; 788 tx_bytes = cpu_stats->es.ps.tx_bytes; 789 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 790 791 stats->rx_packets += rx_packets; 792 stats->rx_bytes += rx_bytes; 793 stats->rx_dropped += rx_dropped; 794 stats->rx_errors += rx_errors; 795 stats->tx_packets += tx_packets; 796 stats->tx_bytes += tx_bytes; 797 } 798 799 stats->tx_dropped = dev->stats.tx_dropped; 800 } 801 802 /* Rx descriptors helper methods */ 803 804 /* Checks whether the RX descriptor having this status is both the first 805 * and the last descriptor for the RX packet. Each RX packet is currently 806 * received through a single RX descriptor, so not having each RX 807 * descriptor with its first and last bits set is an error 808 */ 809 static int mvneta_rxq_desc_is_first_last(u32 status) 810 { 811 return (status & MVNETA_RXD_FIRST_LAST_DESC) == 812 MVNETA_RXD_FIRST_LAST_DESC; 813 } 814 815 /* Add number of descriptors ready to receive new packets */ 816 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, 817 struct mvneta_rx_queue *rxq, 818 int ndescs) 819 { 820 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can 821 * be added at once 822 */ 823 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { 824 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 825 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << 826 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 827 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; 828 } 829 830 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 831 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 832 } 833 834 /* Get number of RX descriptors occupied by received packets */ 835 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, 836 struct mvneta_rx_queue *rxq) 837 { 838 u32 val; 839 840 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); 841 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; 842 } 843 844 /* Update num of rx desc called upon return from rx path or 845 * from mvneta_rxq_drop_pkts(). 846 */ 847 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, 848 struct mvneta_rx_queue *rxq, 849 int rx_done, int rx_filled) 850 { 851 u32 val; 852 853 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { 854 val = rx_done | 855 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); 856 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 857 return; 858 } 859 860 /* Only 255 descriptors can be added at once */ 861 while ((rx_done > 0) || (rx_filled > 0)) { 862 if (rx_done <= 0xff) { 863 val = rx_done; 864 rx_done = 0; 865 } else { 866 val = 0xff; 867 rx_done -= 0xff; 868 } 869 if (rx_filled <= 0xff) { 870 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 871 rx_filled = 0; 872 } else { 873 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 874 rx_filled -= 0xff; 875 } 876 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 877 } 878 } 879 880 /* Get pointer to next RX descriptor to be processed by SW */ 881 static struct mvneta_rx_desc * 882 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) 883 { 884 int rx_desc = rxq->next_desc_to_proc; 885 886 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); 887 prefetch(rxq->descs + rxq->next_desc_to_proc); 888 return rxq->descs + rx_desc; 889 } 890 891 /* Change maximum receive size of the port. */ 892 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) 893 { 894 u32 val; 895 896 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 897 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; 898 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << 899 MVNETA_GMAC_MAX_RX_SIZE_SHIFT; 900 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 901 } 902 903 904 /* Set rx queue offset */ 905 static void mvneta_rxq_offset_set(struct mvneta_port *pp, 906 struct mvneta_rx_queue *rxq, 907 int offset) 908 { 909 u32 val; 910 911 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 912 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; 913 914 /* Offset is in */ 915 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); 916 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 917 } 918 919 920 /* Tx descriptors helper methods */ 921 922 /* Update HW with number of TX descriptors to be sent */ 923 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, 924 struct mvneta_tx_queue *txq, 925 int pend_desc) 926 { 927 u32 val; 928 929 pend_desc += txq->pending; 930 931 /* Only 255 Tx descriptors can be added at once */ 932 do { 933 val = min(pend_desc, 255); 934 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 935 pend_desc -= val; 936 } while (pend_desc > 0); 937 txq->pending = 0; 938 } 939 940 /* Get pointer to next TX descriptor to be processed (send) by HW */ 941 static struct mvneta_tx_desc * 942 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) 943 { 944 int tx_desc = txq->next_desc_to_proc; 945 946 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); 947 return txq->descs + tx_desc; 948 } 949 950 /* Release the last allocated TX descriptor. Useful to handle DMA 951 * mapping failures in the TX path. 952 */ 953 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) 954 { 955 if (txq->next_desc_to_proc == 0) 956 txq->next_desc_to_proc = txq->last_desc - 1; 957 else 958 txq->next_desc_to_proc--; 959 } 960 961 /* Set rxq buf size */ 962 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, 963 struct mvneta_rx_queue *rxq, 964 int buf_size) 965 { 966 u32 val; 967 968 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); 969 970 val &= ~MVNETA_RXQ_BUF_SIZE_MASK; 971 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); 972 973 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); 974 } 975 976 /* Disable buffer management (BM) */ 977 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, 978 struct mvneta_rx_queue *rxq) 979 { 980 u32 val; 981 982 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 983 val &= ~MVNETA_RXQ_HW_BUF_ALLOC; 984 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 985 } 986 987 /* Enable buffer management (BM) */ 988 static void mvneta_rxq_bm_enable(struct mvneta_port *pp, 989 struct mvneta_rx_queue *rxq) 990 { 991 u32 val; 992 993 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 994 val |= MVNETA_RXQ_HW_BUF_ALLOC; 995 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 996 } 997 998 /* Notify HW about port's assignment of pool for bigger packets */ 999 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, 1000 struct mvneta_rx_queue *rxq) 1001 { 1002 u32 val; 1003 1004 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1005 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; 1006 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); 1007 1008 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1009 } 1010 1011 /* Notify HW about port's assignment of pool for smaller packets */ 1012 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, 1013 struct mvneta_rx_queue *rxq) 1014 { 1015 u32 val; 1016 1017 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 1018 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; 1019 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); 1020 1021 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 1022 } 1023 1024 /* Set port's receive buffer size for assigned BM pool */ 1025 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, 1026 int buf_size, 1027 u8 pool_id) 1028 { 1029 u32 val; 1030 1031 if (!IS_ALIGNED(buf_size, 8)) { 1032 dev_warn(pp->dev->dev.parent, 1033 "illegal buf_size value %d, round to %d\n", 1034 buf_size, ALIGN(buf_size, 8)); 1035 buf_size = ALIGN(buf_size, 8); 1036 } 1037 1038 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); 1039 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; 1040 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); 1041 } 1042 1043 /* Configure MBUS window in order to enable access BM internal SRAM */ 1044 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, 1045 u8 target, u8 attr) 1046 { 1047 u32 win_enable, win_protect; 1048 int i; 1049 1050 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); 1051 1052 if (pp->bm_win_id < 0) { 1053 /* Find first not occupied window */ 1054 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { 1055 if (win_enable & (1 << i)) { 1056 pp->bm_win_id = i; 1057 break; 1058 } 1059 } 1060 if (i == MVNETA_MAX_DECODE_WIN) 1061 return -ENOMEM; 1062 } else { 1063 i = pp->bm_win_id; 1064 } 1065 1066 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 1067 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 1068 1069 if (i < 4) 1070 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 1071 1072 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | 1073 (attr << 8) | target); 1074 1075 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); 1076 1077 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); 1078 win_protect |= 3 << (2 * i); 1079 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 1080 1081 win_enable &= ~(1 << i); 1082 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 1083 1084 return 0; 1085 } 1086 1087 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) 1088 { 1089 u32 wsize; 1090 u8 target, attr; 1091 int err; 1092 1093 /* Get BM window information */ 1094 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, 1095 &target, &attr); 1096 if (err < 0) 1097 return err; 1098 1099 pp->bm_win_id = -1; 1100 1101 /* Open NETA -> BM window */ 1102 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, 1103 target, attr); 1104 if (err < 0) { 1105 netdev_info(pp->dev, "fail to configure mbus window to BM\n"); 1106 return err; 1107 } 1108 return 0; 1109 } 1110 1111 /* Assign and initialize pools for port. In case of fail 1112 * buffer manager will remain disabled for current port. 1113 */ 1114 static int mvneta_bm_port_init(struct platform_device *pdev, 1115 struct mvneta_port *pp) 1116 { 1117 struct device_node *dn = pdev->dev.of_node; 1118 u32 long_pool_id, short_pool_id; 1119 1120 if (!pp->neta_armada3700) { 1121 int ret; 1122 1123 ret = mvneta_bm_port_mbus_init(pp); 1124 if (ret) 1125 return ret; 1126 } 1127 1128 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { 1129 netdev_info(pp->dev, "missing long pool id\n"); 1130 return -EINVAL; 1131 } 1132 1133 /* Create port's long pool depending on mtu */ 1134 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, 1135 MVNETA_BM_LONG, pp->id, 1136 MVNETA_RX_PKT_SIZE(pp->dev->mtu)); 1137 if (!pp->pool_long) { 1138 netdev_info(pp->dev, "fail to obtain long pool for port\n"); 1139 return -ENOMEM; 1140 } 1141 1142 pp->pool_long->port_map |= 1 << pp->id; 1143 1144 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, 1145 pp->pool_long->id); 1146 1147 /* If short pool id is not defined, assume using single pool */ 1148 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) 1149 short_pool_id = long_pool_id; 1150 1151 /* Create port's short pool */ 1152 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, 1153 MVNETA_BM_SHORT, pp->id, 1154 MVNETA_BM_SHORT_PKT_SIZE); 1155 if (!pp->pool_short) { 1156 netdev_info(pp->dev, "fail to obtain short pool for port\n"); 1157 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 1158 return -ENOMEM; 1159 } 1160 1161 if (short_pool_id != long_pool_id) { 1162 pp->pool_short->port_map |= 1 << pp->id; 1163 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, 1164 pp->pool_short->id); 1165 } 1166 1167 return 0; 1168 } 1169 1170 /* Update settings of a pool for bigger packets */ 1171 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) 1172 { 1173 struct mvneta_bm_pool *bm_pool = pp->pool_long; 1174 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; 1175 int num; 1176 1177 /* Release all buffers from long pool */ 1178 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); 1179 if (hwbm_pool->buf_num) { 1180 WARN(1, "cannot free all buffers in pool %d\n", 1181 bm_pool->id); 1182 goto bm_mtu_err; 1183 } 1184 1185 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); 1186 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); 1187 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1188 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); 1189 1190 /* Fill entire long pool */ 1191 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); 1192 if (num != hwbm_pool->size) { 1193 WARN(1, "pool %d: %d of %d allocated\n", 1194 bm_pool->id, num, hwbm_pool->size); 1195 goto bm_mtu_err; 1196 } 1197 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); 1198 1199 return; 1200 1201 bm_mtu_err: 1202 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 1203 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); 1204 1205 pp->bm_priv = NULL; 1206 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 1207 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); 1208 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); 1209 } 1210 1211 /* Start the Ethernet port RX and TX activity */ 1212 static void mvneta_port_up(struct mvneta_port *pp) 1213 { 1214 int queue; 1215 u32 q_map; 1216 1217 /* Enable all initialized TXs. */ 1218 q_map = 0; 1219 for (queue = 0; queue < txq_number; queue++) { 1220 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 1221 if (txq->descs) 1222 q_map |= (1 << queue); 1223 } 1224 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 1225 1226 q_map = 0; 1227 /* Enable all initialized RXQs. */ 1228 for (queue = 0; queue < rxq_number; queue++) { 1229 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 1230 1231 if (rxq->descs) 1232 q_map |= (1 << queue); 1233 } 1234 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); 1235 } 1236 1237 /* Stop the Ethernet port activity */ 1238 static void mvneta_port_down(struct mvneta_port *pp) 1239 { 1240 u32 val; 1241 int count; 1242 1243 /* Stop Rx port activity. Check port Rx activity. */ 1244 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; 1245 1246 /* Issue stop command for active channels only */ 1247 if (val != 0) 1248 mvreg_write(pp, MVNETA_RXQ_CMD, 1249 val << MVNETA_RXQ_DISABLE_SHIFT); 1250 1251 /* Wait for all Rx activity to terminate. */ 1252 count = 0; 1253 do { 1254 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { 1255 netdev_warn(pp->dev, 1256 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", 1257 val); 1258 break; 1259 } 1260 mdelay(1); 1261 1262 val = mvreg_read(pp, MVNETA_RXQ_CMD); 1263 } while (val & MVNETA_RXQ_ENABLE_MASK); 1264 1265 /* Stop Tx port activity. Check port Tx activity. Issue stop 1266 * command for active channels only 1267 */ 1268 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; 1269 1270 if (val != 0) 1271 mvreg_write(pp, MVNETA_TXQ_CMD, 1272 (val << MVNETA_TXQ_DISABLE_SHIFT)); 1273 1274 /* Wait for all Tx activity to terminate. */ 1275 count = 0; 1276 do { 1277 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { 1278 netdev_warn(pp->dev, 1279 "TIMEOUT for TX stopped status=0x%08x\n", 1280 val); 1281 break; 1282 } 1283 mdelay(1); 1284 1285 /* Check TX Command reg that all Txqs are stopped */ 1286 val = mvreg_read(pp, MVNETA_TXQ_CMD); 1287 1288 } while (val & MVNETA_TXQ_ENABLE_MASK); 1289 1290 /* Double check to verify that TX FIFO is empty */ 1291 count = 0; 1292 do { 1293 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { 1294 netdev_warn(pp->dev, 1295 "TX FIFO empty timeout status=0x%08x\n", 1296 val); 1297 break; 1298 } 1299 mdelay(1); 1300 1301 val = mvreg_read(pp, MVNETA_PORT_STATUS); 1302 } while (!(val & MVNETA_TX_FIFO_EMPTY) && 1303 (val & MVNETA_TX_IN_PRGRS)); 1304 1305 udelay(200); 1306 } 1307 1308 /* Enable the port by setting the port enable bit of the MAC control register */ 1309 static void mvneta_port_enable(struct mvneta_port *pp) 1310 { 1311 u32 val; 1312 1313 /* Enable port */ 1314 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 1315 val |= MVNETA_GMAC0_PORT_ENABLE; 1316 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1317 } 1318 1319 /* Disable the port and wait for about 200 usec before retuning */ 1320 static void mvneta_port_disable(struct mvneta_port *pp) 1321 { 1322 u32 val; 1323 1324 /* Reset the Enable bit in the Serial Control Register */ 1325 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 1326 val &= ~MVNETA_GMAC0_PORT_ENABLE; 1327 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 1328 1329 udelay(200); 1330 } 1331 1332 /* Multicast tables methods */ 1333 1334 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ 1335 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) 1336 { 1337 int offset; 1338 u32 val; 1339 1340 if (queue == -1) { 1341 val = 0; 1342 } else { 1343 val = 0x1 | (queue << 1); 1344 val |= (val << 24) | (val << 16) | (val << 8); 1345 } 1346 1347 for (offset = 0; offset <= 0xc; offset += 4) 1348 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); 1349 } 1350 1351 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ 1352 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) 1353 { 1354 int offset; 1355 u32 val; 1356 1357 if (queue == -1) { 1358 val = 0; 1359 } else { 1360 val = 0x1 | (queue << 1); 1361 val |= (val << 24) | (val << 16) | (val << 8); 1362 } 1363 1364 for (offset = 0; offset <= 0xfc; offset += 4) 1365 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); 1366 1367 } 1368 1369 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ 1370 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) 1371 { 1372 int offset; 1373 u32 val; 1374 1375 if (queue == -1) { 1376 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); 1377 val = 0; 1378 } else { 1379 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); 1380 val = 0x1 | (queue << 1); 1381 val |= (val << 24) | (val << 16) | (val << 8); 1382 } 1383 1384 for (offset = 0; offset <= 0xfc; offset += 4) 1385 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); 1386 } 1387 1388 static void mvneta_percpu_unmask_interrupt(void *arg) 1389 { 1390 struct mvneta_port *pp = arg; 1391 1392 /* All the queue are unmasked, but actually only the ones 1393 * mapped to this CPU will be unmasked 1394 */ 1395 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 1396 MVNETA_RX_INTR_MASK_ALL | 1397 MVNETA_TX_INTR_MASK_ALL | 1398 MVNETA_MISCINTR_INTR_MASK); 1399 } 1400 1401 static void mvneta_percpu_mask_interrupt(void *arg) 1402 { 1403 struct mvneta_port *pp = arg; 1404 1405 /* All the queue are masked, but actually only the ones 1406 * mapped to this CPU will be masked 1407 */ 1408 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1409 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 1410 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 1411 } 1412 1413 static void mvneta_percpu_clear_intr_cause(void *arg) 1414 { 1415 struct mvneta_port *pp = arg; 1416 1417 /* All the queue are cleared, but actually only the ones 1418 * mapped to this CPU will be cleared 1419 */ 1420 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1421 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 1422 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 1423 } 1424 1425 /* This method sets defaults to the NETA port: 1426 * Clears interrupt Cause and Mask registers. 1427 * Clears all MAC tables. 1428 * Sets defaults to all registers. 1429 * Resets RX and TX descriptor rings. 1430 * Resets PHY. 1431 * This method can be called after mvneta_port_down() to return the port 1432 * settings to defaults. 1433 */ 1434 static void mvneta_defaults_set(struct mvneta_port *pp) 1435 { 1436 int cpu; 1437 int queue; 1438 u32 val; 1439 int max_cpu = num_present_cpus(); 1440 1441 /* Clear all Cause registers */ 1442 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); 1443 1444 /* Mask all interrupts */ 1445 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 1446 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1447 1448 /* Enable MBUS Retry bit16 */ 1449 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); 1450 1451 /* Set CPU queue access map. CPUs are assigned to the RX and 1452 * TX queues modulo their number. If there is only one TX 1453 * queue then it is assigned to the CPU associated to the 1454 * default RX queue. 1455 */ 1456 for_each_present_cpu(cpu) { 1457 int rxq_map = 0, txq_map = 0; 1458 int rxq, txq; 1459 if (!pp->neta_armada3700) { 1460 for (rxq = 0; rxq < rxq_number; rxq++) 1461 if ((rxq % max_cpu) == cpu) 1462 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 1463 1464 for (txq = 0; txq < txq_number; txq++) 1465 if ((txq % max_cpu) == cpu) 1466 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); 1467 1468 /* With only one TX queue we configure a special case 1469 * which will allow to get all the irq on a single 1470 * CPU 1471 */ 1472 if (txq_number == 1) 1473 txq_map = (cpu == pp->rxq_def) ? 1474 MVNETA_CPU_TXQ_ACCESS(1) : 0; 1475 1476 } else { 1477 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 1478 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK; 1479 } 1480 1481 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 1482 } 1483 1484 /* Reset RX and TX DMAs */ 1485 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 1486 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 1487 1488 /* Disable Legacy WRR, Disable EJP, Release from reset */ 1489 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); 1490 for (queue = 0; queue < txq_number; queue++) { 1491 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); 1492 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); 1493 } 1494 1495 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 1496 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 1497 1498 /* Set Port Acceleration Mode */ 1499 if (pp->bm_priv) 1500 /* HW buffer management + legacy parser */ 1501 val = MVNETA_ACC_MODE_EXT2; 1502 else 1503 /* SW buffer management + legacy parser */ 1504 val = MVNETA_ACC_MODE_EXT1; 1505 mvreg_write(pp, MVNETA_ACC_MODE, val); 1506 1507 if (pp->bm_priv) 1508 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); 1509 1510 /* Update val of portCfg register accordingly with all RxQueue types */ 1511 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 1512 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 1513 1514 val = 0; 1515 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); 1516 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); 1517 1518 /* Build PORT_SDMA_CONFIG_REG */ 1519 val = 0; 1520 1521 /* Default burst size */ 1522 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 1523 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 1524 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; 1525 1526 #if defined(__BIG_ENDIAN) 1527 val |= MVNETA_DESC_SWAP; 1528 #endif 1529 1530 /* Assign port SDMA configuration */ 1531 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 1532 1533 /* Disable PHY polling in hardware, since we're using the 1534 * kernel phylib to do this. 1535 */ 1536 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); 1537 val &= ~MVNETA_PHY_POLLING_ENABLE; 1538 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); 1539 1540 mvneta_set_ucast_table(pp, -1); 1541 mvneta_set_special_mcast_table(pp, -1); 1542 mvneta_set_other_mcast_table(pp, -1); 1543 1544 /* Set port interrupt enable register - default enable all */ 1545 mvreg_write(pp, MVNETA_INTR_ENABLE, 1546 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK 1547 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); 1548 1549 mvneta_mib_counters_clear(pp); 1550 } 1551 1552 /* Set max sizes for tx queues */ 1553 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) 1554 1555 { 1556 u32 val, size, mtu; 1557 int queue; 1558 1559 mtu = max_tx_size * 8; 1560 if (mtu > MVNETA_TX_MTU_MAX) 1561 mtu = MVNETA_TX_MTU_MAX; 1562 1563 /* Set MTU */ 1564 val = mvreg_read(pp, MVNETA_TX_MTU); 1565 val &= ~MVNETA_TX_MTU_MAX; 1566 val |= mtu; 1567 mvreg_write(pp, MVNETA_TX_MTU, val); 1568 1569 /* TX token size and all TXQs token size must be larger that MTU */ 1570 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); 1571 1572 size = val & MVNETA_TX_TOKEN_SIZE_MAX; 1573 if (size < mtu) { 1574 size = mtu; 1575 val &= ~MVNETA_TX_TOKEN_SIZE_MAX; 1576 val |= size; 1577 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); 1578 } 1579 for (queue = 0; queue < txq_number; queue++) { 1580 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); 1581 1582 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; 1583 if (size < mtu) { 1584 size = mtu; 1585 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; 1586 val |= size; 1587 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); 1588 } 1589 } 1590 } 1591 1592 /* Set unicast address */ 1593 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, 1594 int queue) 1595 { 1596 unsigned int unicast_reg; 1597 unsigned int tbl_offset; 1598 unsigned int reg_offset; 1599 1600 /* Locate the Unicast table entry */ 1601 last_nibble = (0xf & last_nibble); 1602 1603 /* offset from unicast tbl base */ 1604 tbl_offset = (last_nibble / 4) * 4; 1605 1606 /* offset within the above reg */ 1607 reg_offset = last_nibble % 4; 1608 1609 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); 1610 1611 if (queue == -1) { 1612 /* Clear accepts frame bit at specified unicast DA tbl entry */ 1613 unicast_reg &= ~(0xff << (8 * reg_offset)); 1614 } else { 1615 unicast_reg &= ~(0xff << (8 * reg_offset)); 1616 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1617 } 1618 1619 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); 1620 } 1621 1622 /* Set mac address */ 1623 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, 1624 int queue) 1625 { 1626 unsigned int mac_h; 1627 unsigned int mac_l; 1628 1629 if (queue != -1) { 1630 mac_l = (addr[4] << 8) | (addr[5]); 1631 mac_h = (addr[0] << 24) | (addr[1] << 16) | 1632 (addr[2] << 8) | (addr[3] << 0); 1633 1634 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); 1635 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); 1636 } 1637 1638 /* Accept frames of this address */ 1639 mvneta_set_ucast_addr(pp, addr[5], queue); 1640 } 1641 1642 /* Set the number of packets that will be received before RX interrupt 1643 * will be generated by HW. 1644 */ 1645 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, 1646 struct mvneta_rx_queue *rxq, u32 value) 1647 { 1648 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), 1649 value | MVNETA_RXQ_NON_OCCUPIED(0)); 1650 } 1651 1652 /* Set the time delay in usec before RX interrupt will be generated by 1653 * HW. 1654 */ 1655 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, 1656 struct mvneta_rx_queue *rxq, u32 value) 1657 { 1658 u32 val; 1659 unsigned long clk_rate; 1660 1661 clk_rate = clk_get_rate(pp->clk); 1662 val = (clk_rate / 1000000) * value; 1663 1664 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); 1665 } 1666 1667 /* Set threshold for TX_DONE pkts coalescing */ 1668 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, 1669 struct mvneta_tx_queue *txq, u32 value) 1670 { 1671 u32 val; 1672 1673 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); 1674 1675 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; 1676 val |= MVNETA_TXQ_SENT_THRESH_MASK(value); 1677 1678 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); 1679 } 1680 1681 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ 1682 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, 1683 u32 phys_addr, void *virt_addr, 1684 struct mvneta_rx_queue *rxq) 1685 { 1686 int i; 1687 1688 rx_desc->buf_phys_addr = phys_addr; 1689 i = rx_desc - rxq->descs; 1690 rxq->buf_virt_addr[i] = virt_addr; 1691 } 1692 1693 /* Decrement sent descriptors counter */ 1694 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, 1695 struct mvneta_tx_queue *txq, 1696 int sent_desc) 1697 { 1698 u32 val; 1699 1700 /* Only 255 TX descriptors can be updated at once */ 1701 while (sent_desc > 0xff) { 1702 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; 1703 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1704 sent_desc = sent_desc - 0xff; 1705 } 1706 1707 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; 1708 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1709 } 1710 1711 /* Get number of TX descriptors already sent by HW */ 1712 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, 1713 struct mvneta_tx_queue *txq) 1714 { 1715 u32 val; 1716 int sent_desc; 1717 1718 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); 1719 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> 1720 MVNETA_TXQ_SENT_DESC_SHIFT; 1721 1722 return sent_desc; 1723 } 1724 1725 /* Get number of sent descriptors and decrement counter. 1726 * The number of sent descriptors is returned. 1727 */ 1728 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, 1729 struct mvneta_tx_queue *txq) 1730 { 1731 int sent_desc; 1732 1733 /* Get number of sent descriptors */ 1734 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); 1735 1736 /* Decrement sent descriptors counter */ 1737 if (sent_desc) 1738 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); 1739 1740 return sent_desc; 1741 } 1742 1743 /* Set TXQ descriptors fields relevant for CSUM calculation */ 1744 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, 1745 int ip_hdr_len, int l4_proto) 1746 { 1747 u32 command; 1748 1749 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 1750 * G_L4_chk, L4_type; required only for checksum 1751 * calculation 1752 */ 1753 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1754 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1755 1756 if (l3_proto == htons(ETH_P_IP)) 1757 command |= MVNETA_TXD_IP_CSUM; 1758 else 1759 command |= MVNETA_TX_L3_IP6; 1760 1761 if (l4_proto == IPPROTO_TCP) 1762 command |= MVNETA_TX_L4_CSUM_FULL; 1763 else if (l4_proto == IPPROTO_UDP) 1764 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; 1765 else 1766 command |= MVNETA_TX_L4_CSUM_NOT; 1767 1768 return command; 1769 } 1770 1771 1772 /* Display more error info */ 1773 static void mvneta_rx_error(struct mvneta_port *pp, 1774 struct mvneta_rx_desc *rx_desc) 1775 { 1776 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1777 u32 status = rx_desc->status; 1778 1779 /* update per-cpu counter */ 1780 u64_stats_update_begin(&stats->syncp); 1781 stats->rx_errors++; 1782 u64_stats_update_end(&stats->syncp); 1783 1784 switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1785 case MVNETA_RXD_ERR_CRC: 1786 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", 1787 status, rx_desc->data_size); 1788 break; 1789 case MVNETA_RXD_ERR_OVERRUN: 1790 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", 1791 status, rx_desc->data_size); 1792 break; 1793 case MVNETA_RXD_ERR_LEN: 1794 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", 1795 status, rx_desc->data_size); 1796 break; 1797 case MVNETA_RXD_ERR_RESOURCE: 1798 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", 1799 status, rx_desc->data_size); 1800 break; 1801 } 1802 } 1803 1804 /* Handle RX checksum offload based on the descriptor's status */ 1805 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, 1806 struct sk_buff *skb) 1807 { 1808 if ((pp->dev->features & NETIF_F_RXCSUM) && 1809 (status & MVNETA_RXD_L3_IP4) && 1810 (status & MVNETA_RXD_L4_CSUM_OK)) { 1811 skb->csum = 0; 1812 skb->ip_summed = CHECKSUM_UNNECESSARY; 1813 return; 1814 } 1815 1816 skb->ip_summed = CHECKSUM_NONE; 1817 } 1818 1819 /* Return tx queue pointer (find last set bit) according to <cause> returned 1820 * form tx_done reg. <cause> must not be null. The return value is always a 1821 * valid queue for matching the first one found in <cause>. 1822 */ 1823 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, 1824 u32 cause) 1825 { 1826 int queue = fls(cause) - 1; 1827 1828 return &pp->txqs[queue]; 1829 } 1830 1831 /* Free tx queue skbuffs */ 1832 static void mvneta_txq_bufs_free(struct mvneta_port *pp, 1833 struct mvneta_tx_queue *txq, int num, 1834 struct netdev_queue *nq, bool napi) 1835 { 1836 unsigned int bytes_compl = 0, pkts_compl = 0; 1837 struct xdp_frame_bulk bq; 1838 int i; 1839 1840 xdp_frame_bulk_init(&bq); 1841 1842 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 1843 1844 for (i = 0; i < num; i++) { 1845 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; 1846 struct mvneta_tx_desc *tx_desc = txq->descs + 1847 txq->txq_get_index; 1848 1849 mvneta_txq_inc_get(txq); 1850 1851 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) && 1852 buf->type != MVNETA_TYPE_XDP_TX) 1853 dma_unmap_single(pp->dev->dev.parent, 1854 tx_desc->buf_phys_addr, 1855 tx_desc->data_size, DMA_TO_DEVICE); 1856 if (buf->type == MVNETA_TYPE_SKB && buf->skb) { 1857 bytes_compl += buf->skb->len; 1858 pkts_compl++; 1859 dev_kfree_skb_any(buf->skb); 1860 } else if (buf->type == MVNETA_TYPE_XDP_TX || 1861 buf->type == MVNETA_TYPE_XDP_NDO) { 1862 if (napi && buf->type == MVNETA_TYPE_XDP_TX) 1863 xdp_return_frame_rx_napi(buf->xdpf); 1864 else 1865 xdp_return_frame_bulk(buf->xdpf, &bq); 1866 } 1867 } 1868 xdp_flush_frame_bulk(&bq); 1869 1870 rcu_read_unlock(); 1871 1872 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); 1873 } 1874 1875 /* Handle end of transmission */ 1876 static void mvneta_txq_done(struct mvneta_port *pp, 1877 struct mvneta_tx_queue *txq) 1878 { 1879 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 1880 int tx_done; 1881 1882 tx_done = mvneta_txq_sent_desc_proc(pp, txq); 1883 if (!tx_done) 1884 return; 1885 1886 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); 1887 1888 txq->count -= tx_done; 1889 1890 if (netif_tx_queue_stopped(nq)) { 1891 if (txq->count <= txq->tx_wake_threshold) 1892 netif_tx_wake_queue(nq); 1893 } 1894 } 1895 1896 /* Refill processing for SW buffer management */ 1897 /* Allocate page per descriptor */ 1898 static int mvneta_rx_refill(struct mvneta_port *pp, 1899 struct mvneta_rx_desc *rx_desc, 1900 struct mvneta_rx_queue *rxq, 1901 gfp_t gfp_mask) 1902 { 1903 dma_addr_t phys_addr; 1904 struct page *page; 1905 1906 page = page_pool_alloc_pages(rxq->page_pool, 1907 gfp_mask | __GFP_NOWARN); 1908 if (!page) 1909 return -ENOMEM; 1910 1911 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; 1912 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); 1913 1914 return 0; 1915 } 1916 1917 /* Handle tx checksum */ 1918 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) 1919 { 1920 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1921 int ip_hdr_len = 0; 1922 __be16 l3_proto = vlan_get_protocol(skb); 1923 u8 l4_proto; 1924 1925 if (l3_proto == htons(ETH_P_IP)) { 1926 struct iphdr *ip4h = ip_hdr(skb); 1927 1928 /* Calculate IPv4 checksum and L4 checksum */ 1929 ip_hdr_len = ip4h->ihl; 1930 l4_proto = ip4h->protocol; 1931 } else if (l3_proto == htons(ETH_P_IPV6)) { 1932 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1933 1934 /* Read l4_protocol from one of IPv6 extra headers */ 1935 if (skb_network_header_len(skb) > 0) 1936 ip_hdr_len = (skb_network_header_len(skb) >> 2); 1937 l4_proto = ip6h->nexthdr; 1938 } else 1939 return MVNETA_TX_L4_CSUM_NOT; 1940 1941 return mvneta_txq_desc_csum(skb_network_offset(skb), 1942 l3_proto, ip_hdr_len, l4_proto); 1943 } 1944 1945 return MVNETA_TX_L4_CSUM_NOT; 1946 } 1947 1948 /* Drop packets received by the RXQ and free buffers */ 1949 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, 1950 struct mvneta_rx_queue *rxq) 1951 { 1952 int rx_done, i; 1953 1954 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1955 if (rx_done) 1956 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1957 1958 if (pp->bm_priv) { 1959 for (i = 0; i < rx_done; i++) { 1960 struct mvneta_rx_desc *rx_desc = 1961 mvneta_rxq_next_desc_get(rxq); 1962 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); 1963 struct mvneta_bm_pool *bm_pool; 1964 1965 bm_pool = &pp->bm_priv->bm_pools[pool_id]; 1966 /* Return dropped buffer to the pool */ 1967 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 1968 rx_desc->buf_phys_addr); 1969 } 1970 return; 1971 } 1972 1973 for (i = 0; i < rxq->size; i++) { 1974 struct mvneta_rx_desc *rx_desc = rxq->descs + i; 1975 void *data = rxq->buf_virt_addr[i]; 1976 if (!data || !(rx_desc->buf_phys_addr)) 1977 continue; 1978 1979 page_pool_put_full_page(rxq->page_pool, data, false); 1980 } 1981 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 1982 xdp_rxq_info_unreg(&rxq->xdp_rxq); 1983 page_pool_destroy(rxq->page_pool); 1984 rxq->page_pool = NULL; 1985 } 1986 1987 static void 1988 mvneta_update_stats(struct mvneta_port *pp, 1989 struct mvneta_stats *ps) 1990 { 1991 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1992 1993 u64_stats_update_begin(&stats->syncp); 1994 stats->es.ps.rx_packets += ps->rx_packets; 1995 stats->es.ps.rx_bytes += ps->rx_bytes; 1996 /* xdp */ 1997 stats->es.ps.xdp_redirect += ps->xdp_redirect; 1998 stats->es.ps.xdp_pass += ps->xdp_pass; 1999 stats->es.ps.xdp_drop += ps->xdp_drop; 2000 u64_stats_update_end(&stats->syncp); 2001 } 2002 2003 static inline 2004 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) 2005 { 2006 struct mvneta_rx_desc *rx_desc; 2007 int curr_desc = rxq->first_to_refill; 2008 int i; 2009 2010 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { 2011 rx_desc = rxq->descs + curr_desc; 2012 if (!(rx_desc->buf_phys_addr)) { 2013 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { 2014 struct mvneta_pcpu_stats *stats; 2015 2016 pr_err("Can't refill queue %d. Done %d from %d\n", 2017 rxq->id, i, rxq->refill_num); 2018 2019 stats = this_cpu_ptr(pp->stats); 2020 u64_stats_update_begin(&stats->syncp); 2021 stats->es.refill_error++; 2022 u64_stats_update_end(&stats->syncp); 2023 break; 2024 } 2025 } 2026 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); 2027 } 2028 rxq->refill_num -= i; 2029 rxq->first_to_refill = curr_desc; 2030 2031 return i; 2032 } 2033 2034 static void 2035 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2036 struct xdp_buff *xdp, struct skb_shared_info *sinfo, 2037 int sync_len) 2038 { 2039 int i; 2040 2041 for (i = 0; i < sinfo->nr_frags; i++) 2042 page_pool_put_full_page(rxq->page_pool, 2043 skb_frag_page(&sinfo->frags[i]), true); 2044 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), 2045 sync_len, true); 2046 } 2047 2048 static int 2049 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, 2050 struct xdp_frame *xdpf, bool dma_map) 2051 { 2052 struct mvneta_tx_desc *tx_desc; 2053 struct mvneta_tx_buf *buf; 2054 dma_addr_t dma_addr; 2055 2056 if (txq->count >= txq->tx_stop_threshold) 2057 return MVNETA_XDP_DROPPED; 2058 2059 tx_desc = mvneta_txq_next_desc_get(txq); 2060 2061 buf = &txq->buf[txq->txq_put_index]; 2062 if (dma_map) { 2063 /* ndo_xdp_xmit */ 2064 dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data, 2065 xdpf->len, DMA_TO_DEVICE); 2066 if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { 2067 mvneta_txq_desc_put(txq); 2068 return MVNETA_XDP_DROPPED; 2069 } 2070 buf->type = MVNETA_TYPE_XDP_NDO; 2071 } else { 2072 struct page *page = virt_to_page(xdpf->data); 2073 2074 dma_addr = page_pool_get_dma_addr(page) + 2075 sizeof(*xdpf) + xdpf->headroom; 2076 dma_sync_single_for_device(pp->dev->dev.parent, dma_addr, 2077 xdpf->len, DMA_BIDIRECTIONAL); 2078 buf->type = MVNETA_TYPE_XDP_TX; 2079 } 2080 buf->xdpf = xdpf; 2081 2082 tx_desc->command = MVNETA_TXD_FLZ_DESC; 2083 tx_desc->buf_phys_addr = dma_addr; 2084 tx_desc->data_size = xdpf->len; 2085 2086 mvneta_txq_inc_put(txq); 2087 txq->pending++; 2088 txq->count++; 2089 2090 return MVNETA_XDP_TX; 2091 } 2092 2093 static int 2094 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) 2095 { 2096 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2097 struct mvneta_tx_queue *txq; 2098 struct netdev_queue *nq; 2099 struct xdp_frame *xdpf; 2100 int cpu; 2101 u32 ret; 2102 2103 xdpf = xdp_convert_buff_to_frame(xdp); 2104 if (unlikely(!xdpf)) 2105 return MVNETA_XDP_DROPPED; 2106 2107 cpu = smp_processor_id(); 2108 txq = &pp->txqs[cpu % txq_number]; 2109 nq = netdev_get_tx_queue(pp->dev, txq->id); 2110 2111 __netif_tx_lock(nq, cpu); 2112 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false); 2113 if (ret == MVNETA_XDP_TX) { 2114 u64_stats_update_begin(&stats->syncp); 2115 stats->es.ps.tx_bytes += xdpf->len; 2116 stats->es.ps.tx_packets++; 2117 stats->es.ps.xdp_tx++; 2118 u64_stats_update_end(&stats->syncp); 2119 2120 mvneta_txq_pend_desc_add(pp, txq, 0); 2121 } else { 2122 u64_stats_update_begin(&stats->syncp); 2123 stats->es.ps.xdp_tx_err++; 2124 u64_stats_update_end(&stats->syncp); 2125 } 2126 __netif_tx_unlock(nq); 2127 2128 return ret; 2129 } 2130 2131 static int 2132 mvneta_xdp_xmit(struct net_device *dev, int num_frame, 2133 struct xdp_frame **frames, u32 flags) 2134 { 2135 struct mvneta_port *pp = netdev_priv(dev); 2136 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2137 int i, nxmit_byte = 0, nxmit = num_frame; 2138 int cpu = smp_processor_id(); 2139 struct mvneta_tx_queue *txq; 2140 struct netdev_queue *nq; 2141 u32 ret; 2142 2143 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) 2144 return -ENETDOWN; 2145 2146 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2147 return -EINVAL; 2148 2149 txq = &pp->txqs[cpu % txq_number]; 2150 nq = netdev_get_tx_queue(pp->dev, txq->id); 2151 2152 __netif_tx_lock(nq, cpu); 2153 for (i = 0; i < num_frame; i++) { 2154 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true); 2155 if (ret == MVNETA_XDP_TX) { 2156 nxmit_byte += frames[i]->len; 2157 } else { 2158 xdp_return_frame_rx_napi(frames[i]); 2159 nxmit--; 2160 } 2161 } 2162 2163 if (unlikely(flags & XDP_XMIT_FLUSH)) 2164 mvneta_txq_pend_desc_add(pp, txq, 0); 2165 __netif_tx_unlock(nq); 2166 2167 u64_stats_update_begin(&stats->syncp); 2168 stats->es.ps.tx_bytes += nxmit_byte; 2169 stats->es.ps.tx_packets += nxmit; 2170 stats->es.ps.xdp_xmit += nxmit; 2171 stats->es.ps.xdp_xmit_err += num_frame - nxmit; 2172 u64_stats_update_end(&stats->syncp); 2173 2174 return nxmit; 2175 } 2176 2177 static int 2178 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2179 struct bpf_prog *prog, struct xdp_buff *xdp, 2180 u32 frame_sz, struct mvneta_stats *stats) 2181 { 2182 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2183 unsigned int len, data_len, sync; 2184 u32 ret, act; 2185 2186 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; 2187 data_len = xdp->data_end - xdp->data; 2188 act = bpf_prog_run_xdp(prog, xdp); 2189 2190 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 2191 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; 2192 sync = max(sync, len); 2193 2194 switch (act) { 2195 case XDP_PASS: 2196 stats->xdp_pass++; 2197 return MVNETA_XDP_PASS; 2198 case XDP_REDIRECT: { 2199 int err; 2200 2201 err = xdp_do_redirect(pp->dev, xdp, prog); 2202 if (unlikely(err)) { 2203 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); 2204 ret = MVNETA_XDP_DROPPED; 2205 } else { 2206 ret = MVNETA_XDP_REDIR; 2207 stats->xdp_redirect++; 2208 } 2209 break; 2210 } 2211 case XDP_TX: 2212 ret = mvneta_xdp_xmit_back(pp, xdp); 2213 if (ret != MVNETA_XDP_TX) 2214 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); 2215 break; 2216 default: 2217 bpf_warn_invalid_xdp_action(act); 2218 fallthrough; 2219 case XDP_ABORTED: 2220 trace_xdp_exception(pp->dev, prog, act); 2221 fallthrough; 2222 case XDP_DROP: 2223 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); 2224 ret = MVNETA_XDP_DROPPED; 2225 stats->xdp_drop++; 2226 break; 2227 } 2228 2229 stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; 2230 stats->rx_packets++; 2231 2232 return ret; 2233 } 2234 2235 static void 2236 mvneta_swbm_rx_frame(struct mvneta_port *pp, 2237 struct mvneta_rx_desc *rx_desc, 2238 struct mvneta_rx_queue *rxq, 2239 struct xdp_buff *xdp, int *size, 2240 struct page *page) 2241 { 2242 unsigned char *data = page_address(page); 2243 int data_len = -MVNETA_MH_SIZE, len; 2244 struct net_device *dev = pp->dev; 2245 enum dma_data_direction dma_dir; 2246 struct skb_shared_info *sinfo; 2247 2248 if (*size > MVNETA_MAX_RX_BUF_SIZE) { 2249 len = MVNETA_MAX_RX_BUF_SIZE; 2250 data_len += len; 2251 } else { 2252 len = *size; 2253 data_len += len - ETH_FCS_LEN; 2254 } 2255 *size = *size - len; 2256 2257 dma_dir = page_pool_get_dma_dir(rxq->page_pool); 2258 dma_sync_single_for_cpu(dev->dev.parent, 2259 rx_desc->buf_phys_addr, 2260 len, dma_dir); 2261 2262 rx_desc->buf_phys_addr = 0; 2263 2264 /* Prefetch header */ 2265 prefetch(data); 2266 2267 xdp->data_hard_start = data; 2268 xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE; 2269 xdp->data_end = xdp->data + data_len; 2270 xdp_set_data_meta_invalid(xdp); 2271 2272 sinfo = xdp_get_shared_info_from_buff(xdp); 2273 sinfo->nr_frags = 0; 2274 } 2275 2276 static void 2277 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, 2278 struct mvneta_rx_desc *rx_desc, 2279 struct mvneta_rx_queue *rxq, 2280 struct xdp_buff *xdp, int *size, 2281 struct skb_shared_info *xdp_sinfo, 2282 struct page *page) 2283 { 2284 struct net_device *dev = pp->dev; 2285 enum dma_data_direction dma_dir; 2286 int data_len, len; 2287 2288 if (*size > MVNETA_MAX_RX_BUF_SIZE) { 2289 len = MVNETA_MAX_RX_BUF_SIZE; 2290 data_len = len; 2291 } else { 2292 len = *size; 2293 data_len = len - ETH_FCS_LEN; 2294 } 2295 dma_dir = page_pool_get_dma_dir(rxq->page_pool); 2296 dma_sync_single_for_cpu(dev->dev.parent, 2297 rx_desc->buf_phys_addr, 2298 len, dma_dir); 2299 rx_desc->buf_phys_addr = 0; 2300 2301 if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) { 2302 skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++]; 2303 2304 skb_frag_off_set(frag, pp->rx_offset_correction); 2305 skb_frag_size_set(frag, data_len); 2306 __skb_frag_set_page(frag, page); 2307 2308 /* last fragment */ 2309 if (len == *size) { 2310 struct skb_shared_info *sinfo; 2311 2312 sinfo = xdp_get_shared_info_from_buff(xdp); 2313 sinfo->nr_frags = xdp_sinfo->nr_frags; 2314 memcpy(sinfo->frags, xdp_sinfo->frags, 2315 sinfo->nr_frags * sizeof(skb_frag_t)); 2316 } 2317 } else { 2318 page_pool_put_full_page(rxq->page_pool, page, true); 2319 } 2320 *size -= len; 2321 } 2322 2323 static struct sk_buff * 2324 mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2325 struct xdp_buff *xdp, u32 desc_status) 2326 { 2327 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2328 int i, num_frags = sinfo->nr_frags; 2329 struct sk_buff *skb; 2330 2331 skb = build_skb(xdp->data_hard_start, PAGE_SIZE); 2332 if (!skb) 2333 return ERR_PTR(-ENOMEM); 2334 2335 page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data)); 2336 2337 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2338 skb_put(skb, xdp->data_end - xdp->data); 2339 mvneta_rx_csum(pp, desc_status, skb); 2340 2341 for (i = 0; i < num_frags; i++) { 2342 skb_frag_t *frag = &sinfo->frags[i]; 2343 2344 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 2345 skb_frag_page(frag), skb_frag_off(frag), 2346 skb_frag_size(frag), PAGE_SIZE); 2347 page_pool_release_page(rxq->page_pool, skb_frag_page(frag)); 2348 } 2349 2350 return skb; 2351 } 2352 2353 /* Main rx processing when using software buffer management */ 2354 static int mvneta_rx_swbm(struct napi_struct *napi, 2355 struct mvneta_port *pp, int budget, 2356 struct mvneta_rx_queue *rxq) 2357 { 2358 int rx_proc = 0, rx_todo, refill, size = 0; 2359 struct net_device *dev = pp->dev; 2360 struct skb_shared_info sinfo; 2361 struct mvneta_stats ps = {}; 2362 struct bpf_prog *xdp_prog; 2363 u32 desc_status, frame_sz; 2364 struct xdp_buff xdp_buf; 2365 2366 xdp_buf.data_hard_start = NULL; 2367 xdp_buf.frame_sz = PAGE_SIZE; 2368 xdp_buf.rxq = &rxq->xdp_rxq; 2369 2370 sinfo.nr_frags = 0; 2371 2372 /* Get number of received packets */ 2373 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); 2374 2375 rcu_read_lock(); 2376 xdp_prog = READ_ONCE(pp->xdp_prog); 2377 2378 /* Fairness NAPI loop */ 2379 while (rx_proc < budget && rx_proc < rx_todo) { 2380 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 2381 u32 rx_status, index; 2382 struct sk_buff *skb; 2383 struct page *page; 2384 2385 index = rx_desc - rxq->descs; 2386 page = (struct page *)rxq->buf_virt_addr[index]; 2387 2388 rx_status = rx_desc->status; 2389 rx_proc++; 2390 rxq->refill_num++; 2391 2392 if (rx_status & MVNETA_RXD_FIRST_DESC) { 2393 /* Check errors only for FIRST descriptor */ 2394 if (rx_status & MVNETA_RXD_ERR_SUMMARY) { 2395 mvneta_rx_error(pp, rx_desc); 2396 goto next; 2397 } 2398 2399 size = rx_desc->data_size; 2400 frame_sz = size - ETH_FCS_LEN; 2401 desc_status = rx_status; 2402 2403 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, 2404 &size, page); 2405 } else { 2406 if (unlikely(!xdp_buf.data_hard_start)) { 2407 rx_desc->buf_phys_addr = 0; 2408 page_pool_put_full_page(rxq->page_pool, page, 2409 true); 2410 goto next; 2411 } 2412 2413 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, 2414 &size, &sinfo, page); 2415 } /* Middle or Last descriptor */ 2416 2417 if (!(rx_status & MVNETA_RXD_LAST_DESC)) 2418 /* no last descriptor this time */ 2419 continue; 2420 2421 if (size) { 2422 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); 2423 goto next; 2424 } 2425 2426 if (xdp_prog && 2427 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) 2428 goto next; 2429 2430 skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status); 2431 if (IS_ERR(skb)) { 2432 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2433 2434 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); 2435 2436 u64_stats_update_begin(&stats->syncp); 2437 stats->es.skb_alloc_error++; 2438 stats->rx_dropped++; 2439 u64_stats_update_end(&stats->syncp); 2440 2441 goto next; 2442 } 2443 2444 ps.rx_bytes += skb->len; 2445 ps.rx_packets++; 2446 2447 skb->protocol = eth_type_trans(skb, dev); 2448 napi_gro_receive(napi, skb); 2449 next: 2450 xdp_buf.data_hard_start = NULL; 2451 sinfo.nr_frags = 0; 2452 } 2453 rcu_read_unlock(); 2454 2455 if (xdp_buf.data_hard_start) 2456 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); 2457 2458 if (ps.xdp_redirect) 2459 xdp_do_flush_map(); 2460 2461 if (ps.rx_packets) 2462 mvneta_update_stats(pp, &ps); 2463 2464 /* return some buffers to hardware queue, one at a time is too slow */ 2465 refill = mvneta_rx_refill_queue(pp, rxq); 2466 2467 /* Update rxq management counters */ 2468 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); 2469 2470 return ps.rx_packets; 2471 } 2472 2473 /* Main rx processing when using hardware buffer management */ 2474 static int mvneta_rx_hwbm(struct napi_struct *napi, 2475 struct mvneta_port *pp, int rx_todo, 2476 struct mvneta_rx_queue *rxq) 2477 { 2478 struct net_device *dev = pp->dev; 2479 int rx_done; 2480 u32 rcvd_pkts = 0; 2481 u32 rcvd_bytes = 0; 2482 2483 /* Get number of received packets */ 2484 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 2485 2486 if (rx_todo > rx_done) 2487 rx_todo = rx_done; 2488 2489 rx_done = 0; 2490 2491 /* Fairness NAPI loop */ 2492 while (rx_done < rx_todo) { 2493 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 2494 struct mvneta_bm_pool *bm_pool = NULL; 2495 struct sk_buff *skb; 2496 unsigned char *data; 2497 dma_addr_t phys_addr; 2498 u32 rx_status, frag_size; 2499 int rx_bytes, err; 2500 u8 pool_id; 2501 2502 rx_done++; 2503 rx_status = rx_desc->status; 2504 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 2505 data = (u8 *)(uintptr_t)rx_desc->buf_cookie; 2506 phys_addr = rx_desc->buf_phys_addr; 2507 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); 2508 bm_pool = &pp->bm_priv->bm_pools[pool_id]; 2509 2510 if (!mvneta_rxq_desc_is_first_last(rx_status) || 2511 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 2512 err_drop_frame_ret_pool: 2513 /* Return the buffer to the pool */ 2514 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2515 rx_desc->buf_phys_addr); 2516 err_drop_frame: 2517 mvneta_rx_error(pp, rx_desc); 2518 /* leave the descriptor untouched */ 2519 continue; 2520 } 2521 2522 if (rx_bytes <= rx_copybreak) { 2523 /* better copy a small frame and not unmap the DMA region */ 2524 skb = netdev_alloc_skb_ip_align(dev, rx_bytes); 2525 if (unlikely(!skb)) 2526 goto err_drop_frame_ret_pool; 2527 2528 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, 2529 rx_desc->buf_phys_addr, 2530 MVNETA_MH_SIZE + NET_SKB_PAD, 2531 rx_bytes, 2532 DMA_FROM_DEVICE); 2533 skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD, 2534 rx_bytes); 2535 2536 skb->protocol = eth_type_trans(skb, dev); 2537 mvneta_rx_csum(pp, rx_status, skb); 2538 napi_gro_receive(napi, skb); 2539 2540 rcvd_pkts++; 2541 rcvd_bytes += rx_bytes; 2542 2543 /* Return the buffer to the pool */ 2544 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2545 rx_desc->buf_phys_addr); 2546 2547 /* leave the descriptor and buffer untouched */ 2548 continue; 2549 } 2550 2551 /* Refill processing */ 2552 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); 2553 if (err) { 2554 struct mvneta_pcpu_stats *stats; 2555 2556 netdev_err(dev, "Linux processing - Can't refill\n"); 2557 2558 stats = this_cpu_ptr(pp->stats); 2559 u64_stats_update_begin(&stats->syncp); 2560 stats->es.refill_error++; 2561 u64_stats_update_end(&stats->syncp); 2562 2563 goto err_drop_frame_ret_pool; 2564 } 2565 2566 frag_size = bm_pool->hwbm_pool.frag_size; 2567 2568 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); 2569 2570 /* After refill old buffer has to be unmapped regardless 2571 * the skb is successfully built or not. 2572 */ 2573 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, 2574 bm_pool->buf_size, DMA_FROM_DEVICE); 2575 if (!skb) 2576 goto err_drop_frame; 2577 2578 rcvd_pkts++; 2579 rcvd_bytes += rx_bytes; 2580 2581 /* Linux processing */ 2582 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); 2583 skb_put(skb, rx_bytes); 2584 2585 skb->protocol = eth_type_trans(skb, dev); 2586 2587 mvneta_rx_csum(pp, rx_status, skb); 2588 2589 napi_gro_receive(napi, skb); 2590 } 2591 2592 if (rcvd_pkts) { 2593 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2594 2595 u64_stats_update_begin(&stats->syncp); 2596 stats->es.ps.rx_packets += rcvd_pkts; 2597 stats->es.ps.rx_bytes += rcvd_bytes; 2598 u64_stats_update_end(&stats->syncp); 2599 } 2600 2601 /* Update rxq management counters */ 2602 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 2603 2604 return rx_done; 2605 } 2606 2607 static inline void 2608 mvneta_tso_put_hdr(struct sk_buff *skb, 2609 struct mvneta_port *pp, struct mvneta_tx_queue *txq) 2610 { 2611 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2612 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2613 struct mvneta_tx_desc *tx_desc; 2614 2615 tx_desc = mvneta_txq_next_desc_get(txq); 2616 tx_desc->data_size = hdr_len; 2617 tx_desc->command = mvneta_skb_tx_csum(pp, skb); 2618 tx_desc->command |= MVNETA_TXD_F_DESC; 2619 tx_desc->buf_phys_addr = txq->tso_hdrs_phys + 2620 txq->txq_put_index * TSO_HEADER_SIZE; 2621 buf->type = MVNETA_TYPE_SKB; 2622 buf->skb = NULL; 2623 2624 mvneta_txq_inc_put(txq); 2625 } 2626 2627 static inline int 2628 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, 2629 struct sk_buff *skb, char *data, int size, 2630 bool last_tcp, bool is_last) 2631 { 2632 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2633 struct mvneta_tx_desc *tx_desc; 2634 2635 tx_desc = mvneta_txq_next_desc_get(txq); 2636 tx_desc->data_size = size; 2637 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, 2638 size, DMA_TO_DEVICE); 2639 if (unlikely(dma_mapping_error(dev->dev.parent, 2640 tx_desc->buf_phys_addr))) { 2641 mvneta_txq_desc_put(txq); 2642 return -ENOMEM; 2643 } 2644 2645 tx_desc->command = 0; 2646 buf->type = MVNETA_TYPE_SKB; 2647 buf->skb = NULL; 2648 2649 if (last_tcp) { 2650 /* last descriptor in the TCP packet */ 2651 tx_desc->command = MVNETA_TXD_L_DESC; 2652 2653 /* last descriptor in SKB */ 2654 if (is_last) 2655 buf->skb = skb; 2656 } 2657 mvneta_txq_inc_put(txq); 2658 return 0; 2659 } 2660 2661 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, 2662 struct mvneta_tx_queue *txq) 2663 { 2664 int hdr_len, total_len, data_left; 2665 int desc_count = 0; 2666 struct mvneta_port *pp = netdev_priv(dev); 2667 struct tso_t tso; 2668 int i; 2669 2670 /* Count needed descriptors */ 2671 if ((txq->count + tso_count_descs(skb)) >= txq->size) 2672 return 0; 2673 2674 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { 2675 pr_info("*** Is this even possible???!?!?\n"); 2676 return 0; 2677 } 2678 2679 /* Initialize the TSO handler, and prepare the first payload */ 2680 hdr_len = tso_start(skb, &tso); 2681 2682 total_len = skb->len - hdr_len; 2683 while (total_len > 0) { 2684 char *hdr; 2685 2686 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 2687 total_len -= data_left; 2688 desc_count++; 2689 2690 /* prepare packet headers: MAC + IP + TCP */ 2691 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; 2692 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 2693 2694 mvneta_tso_put_hdr(skb, pp, txq); 2695 2696 while (data_left > 0) { 2697 int size; 2698 desc_count++; 2699 2700 size = min_t(int, tso.size, data_left); 2701 2702 if (mvneta_tso_put_data(dev, txq, skb, 2703 tso.data, size, 2704 size == data_left, 2705 total_len == 0)) 2706 goto err_release; 2707 data_left -= size; 2708 2709 tso_build_data(skb, &tso, size); 2710 } 2711 } 2712 2713 return desc_count; 2714 2715 err_release: 2716 /* Release all used data descriptors; header descriptors must not 2717 * be DMA-unmapped. 2718 */ 2719 for (i = desc_count - 1; i >= 0; i--) { 2720 struct mvneta_tx_desc *tx_desc = txq->descs + i; 2721 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) 2722 dma_unmap_single(pp->dev->dev.parent, 2723 tx_desc->buf_phys_addr, 2724 tx_desc->data_size, 2725 DMA_TO_DEVICE); 2726 mvneta_txq_desc_put(txq); 2727 } 2728 return 0; 2729 } 2730 2731 /* Handle tx fragmentation processing */ 2732 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 2733 struct mvneta_tx_queue *txq) 2734 { 2735 struct mvneta_tx_desc *tx_desc; 2736 int i, nr_frags = skb_shinfo(skb)->nr_frags; 2737 2738 for (i = 0; i < nr_frags; i++) { 2739 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2740 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2741 void *addr = skb_frag_address(frag); 2742 2743 tx_desc = mvneta_txq_next_desc_get(txq); 2744 tx_desc->data_size = skb_frag_size(frag); 2745 2746 tx_desc->buf_phys_addr = 2747 dma_map_single(pp->dev->dev.parent, addr, 2748 tx_desc->data_size, DMA_TO_DEVICE); 2749 2750 if (dma_mapping_error(pp->dev->dev.parent, 2751 tx_desc->buf_phys_addr)) { 2752 mvneta_txq_desc_put(txq); 2753 goto error; 2754 } 2755 2756 if (i == nr_frags - 1) { 2757 /* Last descriptor */ 2758 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 2759 buf->skb = skb; 2760 } else { 2761 /* Descriptor in the middle: Not First, Not Last */ 2762 tx_desc->command = 0; 2763 buf->skb = NULL; 2764 } 2765 buf->type = MVNETA_TYPE_SKB; 2766 mvneta_txq_inc_put(txq); 2767 } 2768 2769 return 0; 2770 2771 error: 2772 /* Release all descriptors that were used to map fragments of 2773 * this packet, as well as the corresponding DMA mappings 2774 */ 2775 for (i = i - 1; i >= 0; i--) { 2776 tx_desc = txq->descs + i; 2777 dma_unmap_single(pp->dev->dev.parent, 2778 tx_desc->buf_phys_addr, 2779 tx_desc->data_size, 2780 DMA_TO_DEVICE); 2781 mvneta_txq_desc_put(txq); 2782 } 2783 2784 return -ENOMEM; 2785 } 2786 2787 /* Main tx processing */ 2788 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) 2789 { 2790 struct mvneta_port *pp = netdev_priv(dev); 2791 u16 txq_id = skb_get_queue_mapping(skb); 2792 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 2793 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; 2794 struct mvneta_tx_desc *tx_desc; 2795 int len = skb->len; 2796 int frags = 0; 2797 u32 tx_cmd; 2798 2799 if (!netif_running(dev)) 2800 goto out; 2801 2802 if (skb_is_gso(skb)) { 2803 frags = mvneta_tx_tso(skb, dev, txq); 2804 goto out; 2805 } 2806 2807 frags = skb_shinfo(skb)->nr_frags + 1; 2808 2809 /* Get a descriptor for the first part of the packet */ 2810 tx_desc = mvneta_txq_next_desc_get(txq); 2811 2812 tx_cmd = mvneta_skb_tx_csum(pp, skb); 2813 2814 tx_desc->data_size = skb_headlen(skb); 2815 2816 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, 2817 tx_desc->data_size, 2818 DMA_TO_DEVICE); 2819 if (unlikely(dma_mapping_error(dev->dev.parent, 2820 tx_desc->buf_phys_addr))) { 2821 mvneta_txq_desc_put(txq); 2822 frags = 0; 2823 goto out; 2824 } 2825 2826 buf->type = MVNETA_TYPE_SKB; 2827 if (frags == 1) { 2828 /* First and Last descriptor */ 2829 tx_cmd |= MVNETA_TXD_FLZ_DESC; 2830 tx_desc->command = tx_cmd; 2831 buf->skb = skb; 2832 mvneta_txq_inc_put(txq); 2833 } else { 2834 /* First but not Last */ 2835 tx_cmd |= MVNETA_TXD_F_DESC; 2836 buf->skb = NULL; 2837 mvneta_txq_inc_put(txq); 2838 tx_desc->command = tx_cmd; 2839 /* Continue with other skb fragments */ 2840 if (mvneta_tx_frag_process(pp, skb, txq)) { 2841 dma_unmap_single(dev->dev.parent, 2842 tx_desc->buf_phys_addr, 2843 tx_desc->data_size, 2844 DMA_TO_DEVICE); 2845 mvneta_txq_desc_put(txq); 2846 frags = 0; 2847 goto out; 2848 } 2849 } 2850 2851 out: 2852 if (frags > 0) { 2853 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 2854 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2855 2856 netdev_tx_sent_queue(nq, len); 2857 2858 txq->count += frags; 2859 if (txq->count >= txq->tx_stop_threshold) 2860 netif_tx_stop_queue(nq); 2861 2862 if (!netdev_xmit_more() || netif_xmit_stopped(nq) || 2863 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) 2864 mvneta_txq_pend_desc_add(pp, txq, frags); 2865 else 2866 txq->pending += frags; 2867 2868 u64_stats_update_begin(&stats->syncp); 2869 stats->es.ps.tx_bytes += len; 2870 stats->es.ps.tx_packets++; 2871 u64_stats_update_end(&stats->syncp); 2872 } else { 2873 dev->stats.tx_dropped++; 2874 dev_kfree_skb_any(skb); 2875 } 2876 2877 return NETDEV_TX_OK; 2878 } 2879 2880 2881 /* Free tx resources, when resetting a port */ 2882 static void mvneta_txq_done_force(struct mvneta_port *pp, 2883 struct mvneta_tx_queue *txq) 2884 2885 { 2886 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 2887 int tx_done = txq->count; 2888 2889 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false); 2890 2891 /* reset txq */ 2892 txq->count = 0; 2893 txq->txq_put_index = 0; 2894 txq->txq_get_index = 0; 2895 } 2896 2897 /* Handle tx done - called in softirq context. The <cause_tx_done> argument 2898 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. 2899 */ 2900 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) 2901 { 2902 struct mvneta_tx_queue *txq; 2903 struct netdev_queue *nq; 2904 int cpu = smp_processor_id(); 2905 2906 while (cause_tx_done) { 2907 txq = mvneta_tx_done_policy(pp, cause_tx_done); 2908 2909 nq = netdev_get_tx_queue(pp->dev, txq->id); 2910 __netif_tx_lock(nq, cpu); 2911 2912 if (txq->count) 2913 mvneta_txq_done(pp, txq); 2914 2915 __netif_tx_unlock(nq); 2916 cause_tx_done &= ~((1 << txq->id)); 2917 } 2918 } 2919 2920 /* Compute crc8 of the specified address, using a unique algorithm , 2921 * according to hw spec, different than generic crc8 algorithm 2922 */ 2923 static int mvneta_addr_crc(unsigned char *addr) 2924 { 2925 int crc = 0; 2926 int i; 2927 2928 for (i = 0; i < ETH_ALEN; i++) { 2929 int j; 2930 2931 crc = (crc ^ addr[i]) << 8; 2932 for (j = 7; j >= 0; j--) { 2933 if (crc & (0x100 << j)) 2934 crc ^= 0x107 << j; 2935 } 2936 } 2937 2938 return crc; 2939 } 2940 2941 /* This method controls the net device special MAC multicast support. 2942 * The Special Multicast Table for MAC addresses supports MAC of the form 2943 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 2944 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 2945 * Table entries in the DA-Filter table. This method set the Special 2946 * Multicast Table appropriate entry. 2947 */ 2948 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, 2949 unsigned char last_byte, 2950 int queue) 2951 { 2952 unsigned int smc_table_reg; 2953 unsigned int tbl_offset; 2954 unsigned int reg_offset; 2955 2956 /* Register offset from SMC table base */ 2957 tbl_offset = (last_byte / 4); 2958 /* Entry offset within the above reg */ 2959 reg_offset = last_byte % 4; 2960 2961 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST 2962 + tbl_offset * 4)); 2963 2964 if (queue == -1) 2965 smc_table_reg &= ~(0xff << (8 * reg_offset)); 2966 else { 2967 smc_table_reg &= ~(0xff << (8 * reg_offset)); 2968 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 2969 } 2970 2971 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, 2972 smc_table_reg); 2973 } 2974 2975 /* This method controls the network device Other MAC multicast support. 2976 * The Other Multicast Table is used for multicast of another type. 2977 * A CRC-8 is used as an index to the Other Multicast Table entries 2978 * in the DA-Filter table. 2979 * The method gets the CRC-8 value from the calling routine and 2980 * sets the Other Multicast Table appropriate entry according to the 2981 * specified CRC-8 . 2982 */ 2983 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, 2984 unsigned char crc8, 2985 int queue) 2986 { 2987 unsigned int omc_table_reg; 2988 unsigned int tbl_offset; 2989 unsigned int reg_offset; 2990 2991 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ 2992 reg_offset = crc8 % 4; /* Entry offset within the above reg */ 2993 2994 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); 2995 2996 if (queue == -1) { 2997 /* Clear accepts frame bit at specified Other DA table entry */ 2998 omc_table_reg &= ~(0xff << (8 * reg_offset)); 2999 } else { 3000 omc_table_reg &= ~(0xff << (8 * reg_offset)); 3001 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 3002 } 3003 3004 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); 3005 } 3006 3007 /* The network device supports multicast using two tables: 3008 * 1) Special Multicast Table for MAC addresses of the form 3009 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 3010 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 3011 * Table entries in the DA-Filter table. 3012 * 2) Other Multicast Table for multicast of another type. A CRC-8 value 3013 * is used as an index to the Other Multicast Table entries in the 3014 * DA-Filter table. 3015 */ 3016 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, 3017 int queue) 3018 { 3019 unsigned char crc_result = 0; 3020 3021 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { 3022 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); 3023 return 0; 3024 } 3025 3026 crc_result = mvneta_addr_crc(p_addr); 3027 if (queue == -1) { 3028 if (pp->mcast_count[crc_result] == 0) { 3029 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", 3030 crc_result); 3031 return -EINVAL; 3032 } 3033 3034 pp->mcast_count[crc_result]--; 3035 if (pp->mcast_count[crc_result] != 0) { 3036 netdev_info(pp->dev, 3037 "After delete there are %d valid Mcast for crc8=0x%02x\n", 3038 pp->mcast_count[crc_result], crc_result); 3039 return -EINVAL; 3040 } 3041 } else 3042 pp->mcast_count[crc_result]++; 3043 3044 mvneta_set_other_mcast_addr(pp, crc_result, queue); 3045 3046 return 0; 3047 } 3048 3049 /* Configure Fitering mode of Ethernet port */ 3050 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, 3051 int is_promisc) 3052 { 3053 u32 port_cfg_reg, val; 3054 3055 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); 3056 3057 val = mvreg_read(pp, MVNETA_TYPE_PRIO); 3058 3059 /* Set / Clear UPM bit in port configuration register */ 3060 if (is_promisc) { 3061 /* Accept all Unicast addresses */ 3062 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; 3063 val |= MVNETA_FORCE_UNI; 3064 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); 3065 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); 3066 } else { 3067 /* Reject all Unicast addresses */ 3068 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; 3069 val &= ~MVNETA_FORCE_UNI; 3070 } 3071 3072 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); 3073 mvreg_write(pp, MVNETA_TYPE_PRIO, val); 3074 } 3075 3076 /* register unicast and multicast addresses */ 3077 static void mvneta_set_rx_mode(struct net_device *dev) 3078 { 3079 struct mvneta_port *pp = netdev_priv(dev); 3080 struct netdev_hw_addr *ha; 3081 3082 if (dev->flags & IFF_PROMISC) { 3083 /* Accept all: Multicast + Unicast */ 3084 mvneta_rx_unicast_promisc_set(pp, 1); 3085 mvneta_set_ucast_table(pp, pp->rxq_def); 3086 mvneta_set_special_mcast_table(pp, pp->rxq_def); 3087 mvneta_set_other_mcast_table(pp, pp->rxq_def); 3088 } else { 3089 /* Accept single Unicast */ 3090 mvneta_rx_unicast_promisc_set(pp, 0); 3091 mvneta_set_ucast_table(pp, -1); 3092 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); 3093 3094 if (dev->flags & IFF_ALLMULTI) { 3095 /* Accept all multicast */ 3096 mvneta_set_special_mcast_table(pp, pp->rxq_def); 3097 mvneta_set_other_mcast_table(pp, pp->rxq_def); 3098 } else { 3099 /* Accept only initialized multicast */ 3100 mvneta_set_special_mcast_table(pp, -1); 3101 mvneta_set_other_mcast_table(pp, -1); 3102 3103 if (!netdev_mc_empty(dev)) { 3104 netdev_for_each_mc_addr(ha, dev) { 3105 mvneta_mcast_addr_set(pp, ha->addr, 3106 pp->rxq_def); 3107 } 3108 } 3109 } 3110 } 3111 } 3112 3113 /* Interrupt handling - the callback for request_irq() */ 3114 static irqreturn_t mvneta_isr(int irq, void *dev_id) 3115 { 3116 struct mvneta_port *pp = (struct mvneta_port *)dev_id; 3117 3118 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 3119 napi_schedule(&pp->napi); 3120 3121 return IRQ_HANDLED; 3122 } 3123 3124 /* Interrupt handling - the callback for request_percpu_irq() */ 3125 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id) 3126 { 3127 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; 3128 3129 disable_percpu_irq(port->pp->dev->irq); 3130 napi_schedule(&port->napi); 3131 3132 return IRQ_HANDLED; 3133 } 3134 3135 static void mvneta_link_change(struct mvneta_port *pp) 3136 { 3137 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 3138 3139 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); 3140 } 3141 3142 /* NAPI handler 3143 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 3144 * packets on the corresponding TXQ (Bit 0 is for TX queue 1). 3145 * Bits 8 -15 of the cause Rx Tx register indicate that are received 3146 * packets on the corresponding RXQ (Bit 8 is for RX queue 0). 3147 * Each CPU has its own causeRxTx register 3148 */ 3149 static int mvneta_poll(struct napi_struct *napi, int budget) 3150 { 3151 int rx_done = 0; 3152 u32 cause_rx_tx; 3153 int rx_queue; 3154 struct mvneta_port *pp = netdev_priv(napi->dev); 3155 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); 3156 3157 if (!netif_running(pp->dev)) { 3158 napi_complete(napi); 3159 return rx_done; 3160 } 3161 3162 /* Read cause register */ 3163 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); 3164 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { 3165 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); 3166 3167 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 3168 3169 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE | 3170 MVNETA_CAUSE_LINK_CHANGE)) 3171 mvneta_link_change(pp); 3172 } 3173 3174 /* Release Tx descriptors */ 3175 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { 3176 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); 3177 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; 3178 } 3179 3180 /* For the case where the last mvneta_poll did not process all 3181 * RX packets 3182 */ 3183 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : 3184 port->cause_rx_tx; 3185 3186 rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); 3187 if (rx_queue) { 3188 rx_queue = rx_queue - 1; 3189 if (pp->bm_priv) 3190 rx_done = mvneta_rx_hwbm(napi, pp, budget, 3191 &pp->rxqs[rx_queue]); 3192 else 3193 rx_done = mvneta_rx_swbm(napi, pp, budget, 3194 &pp->rxqs[rx_queue]); 3195 } 3196 3197 if (rx_done < budget) { 3198 cause_rx_tx = 0; 3199 napi_complete_done(napi, rx_done); 3200 3201 if (pp->neta_armada3700) { 3202 unsigned long flags; 3203 3204 local_irq_save(flags); 3205 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 3206 MVNETA_RX_INTR_MASK(rxq_number) | 3207 MVNETA_TX_INTR_MASK(txq_number) | 3208 MVNETA_MISCINTR_INTR_MASK); 3209 local_irq_restore(flags); 3210 } else { 3211 enable_percpu_irq(pp->dev->irq, 0); 3212 } 3213 } 3214 3215 if (pp->neta_armada3700) 3216 pp->cause_rx_tx = cause_rx_tx; 3217 else 3218 port->cause_rx_tx = cause_rx_tx; 3219 3220 return rx_done; 3221 } 3222 3223 static int mvneta_create_page_pool(struct mvneta_port *pp, 3224 struct mvneta_rx_queue *rxq, int size) 3225 { 3226 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); 3227 struct page_pool_params pp_params = { 3228 .order = 0, 3229 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 3230 .pool_size = size, 3231 .nid = NUMA_NO_NODE, 3232 .dev = pp->dev->dev.parent, 3233 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 3234 .offset = pp->rx_offset_correction, 3235 .max_len = MVNETA_MAX_RX_BUF_SIZE, 3236 }; 3237 int err; 3238 3239 rxq->page_pool = page_pool_create(&pp_params); 3240 if (IS_ERR(rxq->page_pool)) { 3241 err = PTR_ERR(rxq->page_pool); 3242 rxq->page_pool = NULL; 3243 return err; 3244 } 3245 3246 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0); 3247 if (err < 0) 3248 goto err_free_pp; 3249 3250 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 3251 rxq->page_pool); 3252 if (err) 3253 goto err_unregister_rxq; 3254 3255 return 0; 3256 3257 err_unregister_rxq: 3258 xdp_rxq_info_unreg(&rxq->xdp_rxq); 3259 err_free_pp: 3260 page_pool_destroy(rxq->page_pool); 3261 rxq->page_pool = NULL; 3262 return err; 3263 } 3264 3265 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ 3266 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 3267 int num) 3268 { 3269 int i, err; 3270 3271 err = mvneta_create_page_pool(pp, rxq, num); 3272 if (err < 0) 3273 return err; 3274 3275 for (i = 0; i < num; i++) { 3276 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); 3277 if (mvneta_rx_refill(pp, rxq->descs + i, rxq, 3278 GFP_KERNEL) != 0) { 3279 netdev_err(pp->dev, 3280 "%s:rxq %d, %d of %d buffs filled\n", 3281 __func__, rxq->id, i, num); 3282 break; 3283 } 3284 } 3285 3286 /* Add this number of RX descriptors as non occupied (ready to 3287 * get packets) 3288 */ 3289 mvneta_rxq_non_occup_desc_add(pp, rxq, i); 3290 3291 return i; 3292 } 3293 3294 /* Free all packets pending transmit from all TXQs and reset TX port */ 3295 static void mvneta_tx_reset(struct mvneta_port *pp) 3296 { 3297 int queue; 3298 3299 /* free the skb's in the tx ring */ 3300 for (queue = 0; queue < txq_number; queue++) 3301 mvneta_txq_done_force(pp, &pp->txqs[queue]); 3302 3303 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 3304 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 3305 } 3306 3307 static void mvneta_rx_reset(struct mvneta_port *pp) 3308 { 3309 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 3310 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 3311 } 3312 3313 /* Rx/Tx queue initialization/cleanup methods */ 3314 3315 static int mvneta_rxq_sw_init(struct mvneta_port *pp, 3316 struct mvneta_rx_queue *rxq) 3317 { 3318 rxq->size = pp->rx_ring_size; 3319 3320 /* Allocate memory for RX descriptors */ 3321 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 3322 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 3323 &rxq->descs_phys, GFP_KERNEL); 3324 if (!rxq->descs) 3325 return -ENOMEM; 3326 3327 rxq->last_desc = rxq->size - 1; 3328 3329 return 0; 3330 } 3331 3332 static void mvneta_rxq_hw_init(struct mvneta_port *pp, 3333 struct mvneta_rx_queue *rxq) 3334 { 3335 /* Set Rx descriptors queue starting address */ 3336 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); 3337 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); 3338 3339 /* Set coalescing pkts and time */ 3340 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 3341 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 3342 3343 if (!pp->bm_priv) { 3344 /* Set Offset */ 3345 mvneta_rxq_offset_set(pp, rxq, 0); 3346 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? 3347 MVNETA_MAX_RX_BUF_SIZE : 3348 MVNETA_RX_BUF_SIZE(pp->pkt_size)); 3349 mvneta_rxq_bm_disable(pp, rxq); 3350 mvneta_rxq_fill(pp, rxq, rxq->size); 3351 } else { 3352 /* Set Offset */ 3353 mvneta_rxq_offset_set(pp, rxq, 3354 NET_SKB_PAD - pp->rx_offset_correction); 3355 3356 mvneta_rxq_bm_enable(pp, rxq); 3357 /* Fill RXQ with buffers from RX pool */ 3358 mvneta_rxq_long_pool_set(pp, rxq); 3359 mvneta_rxq_short_pool_set(pp, rxq); 3360 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); 3361 } 3362 } 3363 3364 /* Create a specified RX queue */ 3365 static int mvneta_rxq_init(struct mvneta_port *pp, 3366 struct mvneta_rx_queue *rxq) 3367 3368 { 3369 int ret; 3370 3371 ret = mvneta_rxq_sw_init(pp, rxq); 3372 if (ret < 0) 3373 return ret; 3374 3375 mvneta_rxq_hw_init(pp, rxq); 3376 3377 return 0; 3378 } 3379 3380 /* Cleanup Rx queue */ 3381 static void mvneta_rxq_deinit(struct mvneta_port *pp, 3382 struct mvneta_rx_queue *rxq) 3383 { 3384 mvneta_rxq_drop_pkts(pp, rxq); 3385 3386 if (rxq->descs) 3387 dma_free_coherent(pp->dev->dev.parent, 3388 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 3389 rxq->descs, 3390 rxq->descs_phys); 3391 3392 rxq->descs = NULL; 3393 rxq->last_desc = 0; 3394 rxq->next_desc_to_proc = 0; 3395 rxq->descs_phys = 0; 3396 rxq->first_to_refill = 0; 3397 rxq->refill_num = 0; 3398 } 3399 3400 static int mvneta_txq_sw_init(struct mvneta_port *pp, 3401 struct mvneta_tx_queue *txq) 3402 { 3403 int cpu; 3404 3405 txq->size = pp->tx_ring_size; 3406 3407 /* A queue must always have room for at least one skb. 3408 * Therefore, stop the queue when the free entries reaches 3409 * the maximum number of descriptors per skb. 3410 */ 3411 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; 3412 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; 3413 3414 /* Allocate memory for TX descriptors */ 3415 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 3416 txq->size * MVNETA_DESC_ALIGNED_SIZE, 3417 &txq->descs_phys, GFP_KERNEL); 3418 if (!txq->descs) 3419 return -ENOMEM; 3420 3421 txq->last_desc = txq->size - 1; 3422 3423 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); 3424 if (!txq->buf) 3425 return -ENOMEM; 3426 3427 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 3428 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, 3429 txq->size * TSO_HEADER_SIZE, 3430 &txq->tso_hdrs_phys, GFP_KERNEL); 3431 if (!txq->tso_hdrs) 3432 return -ENOMEM; 3433 3434 /* Setup XPS mapping */ 3435 if (txq_number > 1) 3436 cpu = txq->id % num_present_cpus(); 3437 else 3438 cpu = pp->rxq_def % num_present_cpus(); 3439 cpumask_set_cpu(cpu, &txq->affinity_mask); 3440 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); 3441 3442 return 0; 3443 } 3444 3445 static void mvneta_txq_hw_init(struct mvneta_port *pp, 3446 struct mvneta_tx_queue *txq) 3447 { 3448 /* Set maximum bandwidth for enabled TXQs */ 3449 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); 3450 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); 3451 3452 /* Set Tx descriptors queue starting address */ 3453 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); 3454 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); 3455 3456 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 3457 } 3458 3459 /* Create and initialize a tx queue */ 3460 static int mvneta_txq_init(struct mvneta_port *pp, 3461 struct mvneta_tx_queue *txq) 3462 { 3463 int ret; 3464 3465 ret = mvneta_txq_sw_init(pp, txq); 3466 if (ret < 0) 3467 return ret; 3468 3469 mvneta_txq_hw_init(pp, txq); 3470 3471 return 0; 3472 } 3473 3474 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ 3475 static void mvneta_txq_sw_deinit(struct mvneta_port *pp, 3476 struct mvneta_tx_queue *txq) 3477 { 3478 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 3479 3480 kfree(txq->buf); 3481 3482 if (txq->tso_hdrs) 3483 dma_free_coherent(pp->dev->dev.parent, 3484 txq->size * TSO_HEADER_SIZE, 3485 txq->tso_hdrs, txq->tso_hdrs_phys); 3486 if (txq->descs) 3487 dma_free_coherent(pp->dev->dev.parent, 3488 txq->size * MVNETA_DESC_ALIGNED_SIZE, 3489 txq->descs, txq->descs_phys); 3490 3491 netdev_tx_reset_queue(nq); 3492 3493 txq->descs = NULL; 3494 txq->last_desc = 0; 3495 txq->next_desc_to_proc = 0; 3496 txq->descs_phys = 0; 3497 } 3498 3499 static void mvneta_txq_hw_deinit(struct mvneta_port *pp, 3500 struct mvneta_tx_queue *txq) 3501 { 3502 /* Set minimum bandwidth for disabled TXQs */ 3503 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); 3504 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); 3505 3506 /* Set Tx descriptors queue starting address and size */ 3507 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); 3508 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); 3509 } 3510 3511 static void mvneta_txq_deinit(struct mvneta_port *pp, 3512 struct mvneta_tx_queue *txq) 3513 { 3514 mvneta_txq_sw_deinit(pp, txq); 3515 mvneta_txq_hw_deinit(pp, txq); 3516 } 3517 3518 /* Cleanup all Tx queues */ 3519 static void mvneta_cleanup_txqs(struct mvneta_port *pp) 3520 { 3521 int queue; 3522 3523 for (queue = 0; queue < txq_number; queue++) 3524 mvneta_txq_deinit(pp, &pp->txqs[queue]); 3525 } 3526 3527 /* Cleanup all Rx queues */ 3528 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) 3529 { 3530 int queue; 3531 3532 for (queue = 0; queue < rxq_number; queue++) 3533 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 3534 } 3535 3536 3537 /* Init all Rx queues */ 3538 static int mvneta_setup_rxqs(struct mvneta_port *pp) 3539 { 3540 int queue; 3541 3542 for (queue = 0; queue < rxq_number; queue++) { 3543 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); 3544 3545 if (err) { 3546 netdev_err(pp->dev, "%s: can't create rxq=%d\n", 3547 __func__, queue); 3548 mvneta_cleanup_rxqs(pp); 3549 return err; 3550 } 3551 } 3552 3553 return 0; 3554 } 3555 3556 /* Init all tx queues */ 3557 static int mvneta_setup_txqs(struct mvneta_port *pp) 3558 { 3559 int queue; 3560 3561 for (queue = 0; queue < txq_number; queue++) { 3562 int err = mvneta_txq_init(pp, &pp->txqs[queue]); 3563 if (err) { 3564 netdev_err(pp->dev, "%s: can't create txq=%d\n", 3565 __func__, queue); 3566 mvneta_cleanup_txqs(pp); 3567 return err; 3568 } 3569 } 3570 3571 return 0; 3572 } 3573 3574 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) 3575 { 3576 int ret; 3577 3578 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); 3579 if (ret) 3580 return ret; 3581 3582 return phy_power_on(pp->comphy); 3583 } 3584 3585 static int mvneta_config_interface(struct mvneta_port *pp, 3586 phy_interface_t interface) 3587 { 3588 int ret = 0; 3589 3590 if (pp->comphy) { 3591 if (interface == PHY_INTERFACE_MODE_SGMII || 3592 interface == PHY_INTERFACE_MODE_1000BASEX || 3593 interface == PHY_INTERFACE_MODE_2500BASEX) { 3594 ret = mvneta_comphy_init(pp, interface); 3595 } 3596 } else { 3597 switch (interface) { 3598 case PHY_INTERFACE_MODE_QSGMII: 3599 mvreg_write(pp, MVNETA_SERDES_CFG, 3600 MVNETA_QSGMII_SERDES_PROTO); 3601 break; 3602 3603 case PHY_INTERFACE_MODE_SGMII: 3604 case PHY_INTERFACE_MODE_1000BASEX: 3605 mvreg_write(pp, MVNETA_SERDES_CFG, 3606 MVNETA_SGMII_SERDES_PROTO); 3607 break; 3608 3609 case PHY_INTERFACE_MODE_2500BASEX: 3610 mvreg_write(pp, MVNETA_SERDES_CFG, 3611 MVNETA_HSGMII_SERDES_PROTO); 3612 break; 3613 default: 3614 break; 3615 } 3616 } 3617 3618 pp->phy_interface = interface; 3619 3620 return ret; 3621 } 3622 3623 static void mvneta_start_dev(struct mvneta_port *pp) 3624 { 3625 int cpu; 3626 3627 WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); 3628 3629 mvneta_max_rx_size_set(pp, pp->pkt_size); 3630 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 3631 3632 /* start the Rx/Tx activity */ 3633 mvneta_port_enable(pp); 3634 3635 if (!pp->neta_armada3700) { 3636 /* Enable polling on the port */ 3637 for_each_online_cpu(cpu) { 3638 struct mvneta_pcpu_port *port = 3639 per_cpu_ptr(pp->ports, cpu); 3640 3641 napi_enable(&port->napi); 3642 } 3643 } else { 3644 napi_enable(&pp->napi); 3645 } 3646 3647 /* Unmask interrupts. It has to be done from each CPU */ 3648 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 3649 3650 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 3651 MVNETA_CAUSE_PHY_STATUS_CHANGE | 3652 MVNETA_CAUSE_LINK_CHANGE); 3653 3654 phylink_start(pp->phylink); 3655 3656 /* We may have called phylink_speed_down before */ 3657 phylink_speed_up(pp->phylink); 3658 3659 netif_tx_start_all_queues(pp->dev); 3660 3661 clear_bit(__MVNETA_DOWN, &pp->state); 3662 } 3663 3664 static void mvneta_stop_dev(struct mvneta_port *pp) 3665 { 3666 unsigned int cpu; 3667 3668 set_bit(__MVNETA_DOWN, &pp->state); 3669 3670 if (device_may_wakeup(&pp->dev->dev)) 3671 phylink_speed_down(pp->phylink, false); 3672 3673 phylink_stop(pp->phylink); 3674 3675 if (!pp->neta_armada3700) { 3676 for_each_online_cpu(cpu) { 3677 struct mvneta_pcpu_port *port = 3678 per_cpu_ptr(pp->ports, cpu); 3679 3680 napi_disable(&port->napi); 3681 } 3682 } else { 3683 napi_disable(&pp->napi); 3684 } 3685 3686 netif_carrier_off(pp->dev); 3687 3688 mvneta_port_down(pp); 3689 netif_tx_stop_all_queues(pp->dev); 3690 3691 /* Stop the port activity */ 3692 mvneta_port_disable(pp); 3693 3694 /* Clear all ethernet port interrupts */ 3695 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); 3696 3697 /* Mask all ethernet port interrupts */ 3698 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 3699 3700 mvneta_tx_reset(pp); 3701 mvneta_rx_reset(pp); 3702 3703 WARN_ON(phy_power_off(pp->comphy)); 3704 } 3705 3706 static void mvneta_percpu_enable(void *arg) 3707 { 3708 struct mvneta_port *pp = arg; 3709 3710 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); 3711 } 3712 3713 static void mvneta_percpu_disable(void *arg) 3714 { 3715 struct mvneta_port *pp = arg; 3716 3717 disable_percpu_irq(pp->dev->irq); 3718 } 3719 3720 /* Change the device mtu */ 3721 static int mvneta_change_mtu(struct net_device *dev, int mtu) 3722 { 3723 struct mvneta_port *pp = netdev_priv(dev); 3724 int ret; 3725 3726 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { 3727 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", 3728 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); 3729 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); 3730 } 3731 3732 if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) { 3733 netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu); 3734 return -EINVAL; 3735 } 3736 3737 dev->mtu = mtu; 3738 3739 if (!netif_running(dev)) { 3740 if (pp->bm_priv) 3741 mvneta_bm_update_mtu(pp, mtu); 3742 3743 netdev_update_features(dev); 3744 return 0; 3745 } 3746 3747 /* The interface is running, so we have to force a 3748 * reallocation of the queues 3749 */ 3750 mvneta_stop_dev(pp); 3751 on_each_cpu(mvneta_percpu_disable, pp, true); 3752 3753 mvneta_cleanup_txqs(pp); 3754 mvneta_cleanup_rxqs(pp); 3755 3756 if (pp->bm_priv) 3757 mvneta_bm_update_mtu(pp, mtu); 3758 3759 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); 3760 3761 ret = mvneta_setup_rxqs(pp); 3762 if (ret) { 3763 netdev_err(dev, "unable to setup rxqs after MTU change\n"); 3764 return ret; 3765 } 3766 3767 ret = mvneta_setup_txqs(pp); 3768 if (ret) { 3769 netdev_err(dev, "unable to setup txqs after MTU change\n"); 3770 return ret; 3771 } 3772 3773 on_each_cpu(mvneta_percpu_enable, pp, true); 3774 mvneta_start_dev(pp); 3775 3776 netdev_update_features(dev); 3777 3778 return 0; 3779 } 3780 3781 static netdev_features_t mvneta_fix_features(struct net_device *dev, 3782 netdev_features_t features) 3783 { 3784 struct mvneta_port *pp = netdev_priv(dev); 3785 3786 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { 3787 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 3788 netdev_info(dev, 3789 "Disable IP checksum for MTU greater than %dB\n", 3790 pp->tx_csum_limit); 3791 } 3792 3793 return features; 3794 } 3795 3796 /* Get mac address */ 3797 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) 3798 { 3799 u32 mac_addr_l, mac_addr_h; 3800 3801 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); 3802 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); 3803 addr[0] = (mac_addr_h >> 24) & 0xFF; 3804 addr[1] = (mac_addr_h >> 16) & 0xFF; 3805 addr[2] = (mac_addr_h >> 8) & 0xFF; 3806 addr[3] = mac_addr_h & 0xFF; 3807 addr[4] = (mac_addr_l >> 8) & 0xFF; 3808 addr[5] = mac_addr_l & 0xFF; 3809 } 3810 3811 /* Handle setting mac address */ 3812 static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 3813 { 3814 struct mvneta_port *pp = netdev_priv(dev); 3815 struct sockaddr *sockaddr = addr; 3816 int ret; 3817 3818 ret = eth_prepare_mac_addr_change(dev, addr); 3819 if (ret < 0) 3820 return ret; 3821 /* Remove previous address table entry */ 3822 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 3823 3824 /* Set new addr in hw */ 3825 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); 3826 3827 eth_commit_mac_addr_change(dev, addr); 3828 return 0; 3829 } 3830 3831 static void mvneta_validate(struct phylink_config *config, 3832 unsigned long *supported, 3833 struct phylink_link_state *state) 3834 { 3835 struct net_device *ndev = to_net_dev(config->dev); 3836 struct mvneta_port *pp = netdev_priv(ndev); 3837 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 3838 3839 /* We only support QSGMII, SGMII, 802.3z and RGMII modes */ 3840 if (state->interface != PHY_INTERFACE_MODE_NA && 3841 state->interface != PHY_INTERFACE_MODE_QSGMII && 3842 state->interface != PHY_INTERFACE_MODE_SGMII && 3843 !phy_interface_mode_is_8023z(state->interface) && 3844 !phy_interface_mode_is_rgmii(state->interface)) { 3845 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 3846 return; 3847 } 3848 3849 /* Allow all the expected bits */ 3850 phylink_set(mask, Autoneg); 3851 phylink_set_port_modes(mask); 3852 3853 /* Asymmetric pause is unsupported */ 3854 phylink_set(mask, Pause); 3855 3856 /* Half-duplex at speeds higher than 100Mbit is unsupported */ 3857 if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) { 3858 phylink_set(mask, 1000baseT_Full); 3859 phylink_set(mask, 1000baseX_Full); 3860 } 3861 if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { 3862 phylink_set(mask, 2500baseT_Full); 3863 phylink_set(mask, 2500baseX_Full); 3864 } 3865 3866 if (!phy_interface_mode_is_8023z(state->interface)) { 3867 /* 10M and 100M are only supported in non-802.3z mode */ 3868 phylink_set(mask, 10baseT_Half); 3869 phylink_set(mask, 10baseT_Full); 3870 phylink_set(mask, 100baseT_Half); 3871 phylink_set(mask, 100baseT_Full); 3872 } 3873 3874 bitmap_and(supported, supported, mask, 3875 __ETHTOOL_LINK_MODE_MASK_NBITS); 3876 bitmap_and(state->advertising, state->advertising, mask, 3877 __ETHTOOL_LINK_MODE_MASK_NBITS); 3878 3879 /* We can only operate at 2500BaseX or 1000BaseX. If requested 3880 * to advertise both, only report advertising at 2500BaseX. 3881 */ 3882 phylink_helper_basex_speed(state); 3883 } 3884 3885 static void mvneta_mac_pcs_get_state(struct phylink_config *config, 3886 struct phylink_link_state *state) 3887 { 3888 struct net_device *ndev = to_net_dev(config->dev); 3889 struct mvneta_port *pp = netdev_priv(ndev); 3890 u32 gmac_stat; 3891 3892 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 3893 3894 if (gmac_stat & MVNETA_GMAC_SPEED_1000) 3895 state->speed = 3896 state->interface == PHY_INTERFACE_MODE_2500BASEX ? 3897 SPEED_2500 : SPEED_1000; 3898 else if (gmac_stat & MVNETA_GMAC_SPEED_100) 3899 state->speed = SPEED_100; 3900 else 3901 state->speed = SPEED_10; 3902 3903 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); 3904 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); 3905 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); 3906 3907 state->pause = 0; 3908 if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) 3909 state->pause |= MLO_PAUSE_RX; 3910 if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) 3911 state->pause |= MLO_PAUSE_TX; 3912 } 3913 3914 static void mvneta_mac_an_restart(struct phylink_config *config) 3915 { 3916 struct net_device *ndev = to_net_dev(config->dev); 3917 struct mvneta_port *pp = netdev_priv(ndev); 3918 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 3919 3920 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 3921 gmac_an | MVNETA_GMAC_INBAND_RESTART_AN); 3922 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 3923 gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); 3924 } 3925 3926 static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, 3927 const struct phylink_link_state *state) 3928 { 3929 struct net_device *ndev = to_net_dev(config->dev); 3930 struct mvneta_port *pp = netdev_priv(ndev); 3931 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 3932 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 3933 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); 3934 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); 3935 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 3936 3937 new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; 3938 new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | 3939 MVNETA_GMAC2_PORT_RESET); 3940 new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); 3941 new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE; 3942 new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE | 3943 MVNETA_GMAC_INBAND_RESTART_AN | 3944 MVNETA_GMAC_AN_SPEED_EN | 3945 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL | 3946 MVNETA_GMAC_AN_FLOW_CTRL_EN | 3947 MVNETA_GMAC_AN_DUPLEX_EN); 3948 3949 /* Even though it might look weird, when we're configured in 3950 * SGMII or QSGMII mode, the RGMII bit needs to be set. 3951 */ 3952 new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII; 3953 3954 if (state->interface == PHY_INTERFACE_MODE_QSGMII || 3955 state->interface == PHY_INTERFACE_MODE_SGMII || 3956 phy_interface_mode_is_8023z(state->interface)) 3957 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; 3958 3959 if (phylink_test(state->advertising, Pause)) 3960 new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; 3961 3962 if (!phylink_autoneg_inband(mode)) { 3963 /* Phy or fixed speed - nothing to do, leave the 3964 * configured speed, duplex and flow control as-is. 3965 */ 3966 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 3967 /* SGMII mode receives the state from the PHY */ 3968 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; 3969 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; 3970 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | 3971 MVNETA_GMAC_FORCE_LINK_PASS | 3972 MVNETA_GMAC_CONFIG_MII_SPEED | 3973 MVNETA_GMAC_CONFIG_GMII_SPEED | 3974 MVNETA_GMAC_CONFIG_FULL_DUPLEX)) | 3975 MVNETA_GMAC_INBAND_AN_ENABLE | 3976 MVNETA_GMAC_AN_SPEED_EN | 3977 MVNETA_GMAC_AN_DUPLEX_EN; 3978 } else { 3979 /* 802.3z negotiation - only 1000base-X */ 3980 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; 3981 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; 3982 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | 3983 MVNETA_GMAC_FORCE_LINK_PASS | 3984 MVNETA_GMAC_CONFIG_MII_SPEED)) | 3985 MVNETA_GMAC_INBAND_AN_ENABLE | 3986 MVNETA_GMAC_CONFIG_GMII_SPEED | 3987 /* The MAC only supports FD mode */ 3988 MVNETA_GMAC_CONFIG_FULL_DUPLEX; 3989 3990 if (state->pause & MLO_PAUSE_AN && state->an_enabled) 3991 new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN; 3992 } 3993 3994 /* Armada 370 documentation says we can only change the port mode 3995 * and in-band enable when the link is down, so force it down 3996 * while making these changes. We also do this for GMAC_CTRL2 */ 3997 if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X || 3998 (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE || 3999 (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) { 4000 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 4001 (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) | 4002 MVNETA_GMAC_FORCE_LINK_DOWN); 4003 } 4004 4005 4006 /* When at 2.5G, the link partner can send frames with shortened 4007 * preambles. 4008 */ 4009 if (state->interface == PHY_INTERFACE_MODE_2500BASEX) 4010 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; 4011 4012 if (pp->phy_interface != state->interface) { 4013 if (pp->comphy) 4014 WARN_ON(phy_power_off(pp->comphy)); 4015 WARN_ON(mvneta_config_interface(pp, state->interface)); 4016 } 4017 4018 if (new_ctrl0 != gmac_ctrl0) 4019 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); 4020 if (new_ctrl2 != gmac_ctrl2) 4021 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); 4022 if (new_ctrl4 != gmac_ctrl4) 4023 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); 4024 if (new_clk != gmac_clk) 4025 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); 4026 if (new_an != gmac_an) 4027 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an); 4028 4029 if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { 4030 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 4031 MVNETA_GMAC2_PORT_RESET) != 0) 4032 continue; 4033 } 4034 } 4035 4036 static void mvneta_set_eee(struct mvneta_port *pp, bool enable) 4037 { 4038 u32 lpi_ctl1; 4039 4040 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); 4041 if (enable) 4042 lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; 4043 else 4044 lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; 4045 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); 4046 } 4047 4048 static void mvneta_mac_link_down(struct phylink_config *config, 4049 unsigned int mode, phy_interface_t interface) 4050 { 4051 struct net_device *ndev = to_net_dev(config->dev); 4052 struct mvneta_port *pp = netdev_priv(ndev); 4053 u32 val; 4054 4055 mvneta_port_down(pp); 4056 4057 if (!phylink_autoneg_inband(mode)) { 4058 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4059 val &= ~MVNETA_GMAC_FORCE_LINK_PASS; 4060 val |= MVNETA_GMAC_FORCE_LINK_DOWN; 4061 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4062 } 4063 4064 pp->eee_active = false; 4065 mvneta_set_eee(pp, false); 4066 } 4067 4068 static void mvneta_mac_link_up(struct phylink_config *config, 4069 struct phy_device *phy, 4070 unsigned int mode, phy_interface_t interface, 4071 int speed, int duplex, 4072 bool tx_pause, bool rx_pause) 4073 { 4074 struct net_device *ndev = to_net_dev(config->dev); 4075 struct mvneta_port *pp = netdev_priv(ndev); 4076 u32 val; 4077 4078 if (!phylink_autoneg_inband(mode)) { 4079 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4080 val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN | 4081 MVNETA_GMAC_CONFIG_MII_SPEED | 4082 MVNETA_GMAC_CONFIG_GMII_SPEED | 4083 MVNETA_GMAC_CONFIG_FLOW_CTRL | 4084 MVNETA_GMAC_CONFIG_FULL_DUPLEX); 4085 val |= MVNETA_GMAC_FORCE_LINK_PASS; 4086 4087 if (speed == SPEED_1000 || speed == SPEED_2500) 4088 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 4089 else if (speed == SPEED_100) 4090 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 4091 4092 if (duplex == DUPLEX_FULL) 4093 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 4094 4095 if (tx_pause || rx_pause) 4096 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; 4097 4098 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4099 } else { 4100 /* When inband doesn't cover flow control or flow control is 4101 * disabled, we need to manually configure it. This bit will 4102 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset. 4103 */ 4104 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 4105 val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL; 4106 4107 if (tx_pause || rx_pause) 4108 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; 4109 4110 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 4111 } 4112 4113 mvneta_port_up(pp); 4114 4115 if (phy && pp->eee_enabled) { 4116 pp->eee_active = phy_init_eee(phy, 0) >= 0; 4117 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); 4118 } 4119 } 4120 4121 static const struct phylink_mac_ops mvneta_phylink_ops = { 4122 .validate = mvneta_validate, 4123 .mac_pcs_get_state = mvneta_mac_pcs_get_state, 4124 .mac_an_restart = mvneta_mac_an_restart, 4125 .mac_config = mvneta_mac_config, 4126 .mac_link_down = mvneta_mac_link_down, 4127 .mac_link_up = mvneta_mac_link_up, 4128 }; 4129 4130 static int mvneta_mdio_probe(struct mvneta_port *pp) 4131 { 4132 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 4133 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); 4134 4135 if (err) 4136 netdev_err(pp->dev, "could not attach PHY: %d\n", err); 4137 4138 phylink_ethtool_get_wol(pp->phylink, &wol); 4139 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); 4140 4141 /* PHY WoL may be enabled but device wakeup disabled */ 4142 if (wol.supported) 4143 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); 4144 4145 return err; 4146 } 4147 4148 static void mvneta_mdio_remove(struct mvneta_port *pp) 4149 { 4150 phylink_disconnect_phy(pp->phylink); 4151 } 4152 4153 /* Electing a CPU must be done in an atomic way: it should be done 4154 * after or before the removal/insertion of a CPU and this function is 4155 * not reentrant. 4156 */ 4157 static void mvneta_percpu_elect(struct mvneta_port *pp) 4158 { 4159 int elected_cpu = 0, max_cpu, cpu, i = 0; 4160 4161 /* Use the cpu associated to the rxq when it is online, in all 4162 * the other cases, use the cpu 0 which can't be offline. 4163 */ 4164 if (cpu_online(pp->rxq_def)) 4165 elected_cpu = pp->rxq_def; 4166 4167 max_cpu = num_present_cpus(); 4168 4169 for_each_online_cpu(cpu) { 4170 int rxq_map = 0, txq_map = 0; 4171 int rxq; 4172 4173 for (rxq = 0; rxq < rxq_number; rxq++) 4174 if ((rxq % max_cpu) == cpu) 4175 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 4176 4177 if (cpu == elected_cpu) 4178 /* Map the default receive queue queue to the 4179 * elected CPU 4180 */ 4181 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); 4182 4183 /* We update the TX queue map only if we have one 4184 * queue. In this case we associate the TX queue to 4185 * the CPU bound to the default RX queue 4186 */ 4187 if (txq_number == 1) 4188 txq_map = (cpu == elected_cpu) ? 4189 MVNETA_CPU_TXQ_ACCESS(1) : 0; 4190 else 4191 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 4192 MVNETA_CPU_TXQ_ACCESS_ALL_MASK; 4193 4194 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); 4195 4196 /* Update the interrupt mask on each CPU according the 4197 * new mapping 4198 */ 4199 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 4200 pp, true); 4201 i++; 4202 4203 } 4204 }; 4205 4206 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) 4207 { 4208 int other_cpu; 4209 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4210 node_online); 4211 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 4212 4213 4214 spin_lock(&pp->lock); 4215 /* 4216 * Configuring the driver for a new CPU while the driver is 4217 * stopping is racy, so just avoid it. 4218 */ 4219 if (pp->is_stopped) { 4220 spin_unlock(&pp->lock); 4221 return 0; 4222 } 4223 netif_tx_stop_all_queues(pp->dev); 4224 4225 /* 4226 * We have to synchronise on tha napi of each CPU except the one 4227 * just being woken up 4228 */ 4229 for_each_online_cpu(other_cpu) { 4230 if (other_cpu != cpu) { 4231 struct mvneta_pcpu_port *other_port = 4232 per_cpu_ptr(pp->ports, other_cpu); 4233 4234 napi_synchronize(&other_port->napi); 4235 } 4236 } 4237 4238 /* Mask all ethernet port interrupts */ 4239 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4240 napi_enable(&port->napi); 4241 4242 /* 4243 * Enable per-CPU interrupts on the CPU that is 4244 * brought up. 4245 */ 4246 mvneta_percpu_enable(pp); 4247 4248 /* 4249 * Enable per-CPU interrupt on the one CPU we care 4250 * about. 4251 */ 4252 mvneta_percpu_elect(pp); 4253 4254 /* Unmask all ethernet port interrupts */ 4255 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 4256 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 4257 MVNETA_CAUSE_PHY_STATUS_CHANGE | 4258 MVNETA_CAUSE_LINK_CHANGE); 4259 netif_tx_start_all_queues(pp->dev); 4260 spin_unlock(&pp->lock); 4261 return 0; 4262 } 4263 4264 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node) 4265 { 4266 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4267 node_online); 4268 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 4269 4270 /* 4271 * Thanks to this lock we are sure that any pending cpu election is 4272 * done. 4273 */ 4274 spin_lock(&pp->lock); 4275 /* Mask all ethernet port interrupts */ 4276 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4277 spin_unlock(&pp->lock); 4278 4279 napi_synchronize(&port->napi); 4280 napi_disable(&port->napi); 4281 /* Disable per-CPU interrupts on the CPU that is brought down. */ 4282 mvneta_percpu_disable(pp); 4283 return 0; 4284 } 4285 4286 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) 4287 { 4288 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, 4289 node_dead); 4290 4291 /* Check if a new CPU must be elected now this on is down */ 4292 spin_lock(&pp->lock); 4293 mvneta_percpu_elect(pp); 4294 spin_unlock(&pp->lock); 4295 /* Unmask all ethernet port interrupts */ 4296 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 4297 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 4298 MVNETA_CAUSE_PHY_STATUS_CHANGE | 4299 MVNETA_CAUSE_LINK_CHANGE); 4300 netif_tx_start_all_queues(pp->dev); 4301 return 0; 4302 } 4303 4304 static int mvneta_open(struct net_device *dev) 4305 { 4306 struct mvneta_port *pp = netdev_priv(dev); 4307 int ret; 4308 4309 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 4310 4311 ret = mvneta_setup_rxqs(pp); 4312 if (ret) 4313 return ret; 4314 4315 ret = mvneta_setup_txqs(pp); 4316 if (ret) 4317 goto err_cleanup_rxqs; 4318 4319 /* Connect to port interrupt line */ 4320 if (pp->neta_armada3700) 4321 ret = request_irq(pp->dev->irq, mvneta_isr, 0, 4322 dev->name, pp); 4323 else 4324 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, 4325 dev->name, pp->ports); 4326 if (ret) { 4327 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); 4328 goto err_cleanup_txqs; 4329 } 4330 4331 if (!pp->neta_armada3700) { 4332 /* Enable per-CPU interrupt on all the CPU to handle our RX 4333 * queue interrupts 4334 */ 4335 on_each_cpu(mvneta_percpu_enable, pp, true); 4336 4337 pp->is_stopped = false; 4338 /* Register a CPU notifier to handle the case where our CPU 4339 * might be taken offline. 4340 */ 4341 ret = cpuhp_state_add_instance_nocalls(online_hpstate, 4342 &pp->node_online); 4343 if (ret) 4344 goto err_free_irq; 4345 4346 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4347 &pp->node_dead); 4348 if (ret) 4349 goto err_free_online_hp; 4350 } 4351 4352 ret = mvneta_mdio_probe(pp); 4353 if (ret < 0) { 4354 netdev_err(dev, "cannot probe MDIO bus\n"); 4355 goto err_free_dead_hp; 4356 } 4357 4358 mvneta_start_dev(pp); 4359 4360 return 0; 4361 4362 err_free_dead_hp: 4363 if (!pp->neta_armada3700) 4364 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4365 &pp->node_dead); 4366 err_free_online_hp: 4367 if (!pp->neta_armada3700) 4368 cpuhp_state_remove_instance_nocalls(online_hpstate, 4369 &pp->node_online); 4370 err_free_irq: 4371 if (pp->neta_armada3700) { 4372 free_irq(pp->dev->irq, pp); 4373 } else { 4374 on_each_cpu(mvneta_percpu_disable, pp, true); 4375 free_percpu_irq(pp->dev->irq, pp->ports); 4376 } 4377 err_cleanup_txqs: 4378 mvneta_cleanup_txqs(pp); 4379 err_cleanup_rxqs: 4380 mvneta_cleanup_rxqs(pp); 4381 return ret; 4382 } 4383 4384 /* Stop the port, free port interrupt line */ 4385 static int mvneta_stop(struct net_device *dev) 4386 { 4387 struct mvneta_port *pp = netdev_priv(dev); 4388 4389 if (!pp->neta_armada3700) { 4390 /* Inform that we are stopping so we don't want to setup the 4391 * driver for new CPUs in the notifiers. The code of the 4392 * notifier for CPU online is protected by the same spinlock, 4393 * so when we get the lock, the notifer work is done. 4394 */ 4395 spin_lock(&pp->lock); 4396 pp->is_stopped = true; 4397 spin_unlock(&pp->lock); 4398 4399 mvneta_stop_dev(pp); 4400 mvneta_mdio_remove(pp); 4401 4402 cpuhp_state_remove_instance_nocalls(online_hpstate, 4403 &pp->node_online); 4404 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 4405 &pp->node_dead); 4406 on_each_cpu(mvneta_percpu_disable, pp, true); 4407 free_percpu_irq(dev->irq, pp->ports); 4408 } else { 4409 mvneta_stop_dev(pp); 4410 mvneta_mdio_remove(pp); 4411 free_irq(dev->irq, pp); 4412 } 4413 4414 mvneta_cleanup_rxqs(pp); 4415 mvneta_cleanup_txqs(pp); 4416 4417 return 0; 4418 } 4419 4420 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4421 { 4422 struct mvneta_port *pp = netdev_priv(dev); 4423 4424 return phylink_mii_ioctl(pp->phylink, ifr, cmd); 4425 } 4426 4427 static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, 4428 struct netlink_ext_ack *extack) 4429 { 4430 bool need_update, running = netif_running(dev); 4431 struct mvneta_port *pp = netdev_priv(dev); 4432 struct bpf_prog *old_prog; 4433 4434 if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { 4435 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP"); 4436 return -EOPNOTSUPP; 4437 } 4438 4439 if (pp->bm_priv) { 4440 NL_SET_ERR_MSG_MOD(extack, 4441 "Hardware Buffer Management not supported on XDP"); 4442 return -EOPNOTSUPP; 4443 } 4444 4445 need_update = !!pp->xdp_prog != !!prog; 4446 if (running && need_update) 4447 mvneta_stop(dev); 4448 4449 old_prog = xchg(&pp->xdp_prog, prog); 4450 if (old_prog) 4451 bpf_prog_put(old_prog); 4452 4453 if (running && need_update) 4454 return mvneta_open(dev); 4455 4456 return 0; 4457 } 4458 4459 static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) 4460 { 4461 switch (xdp->command) { 4462 case XDP_SETUP_PROG: 4463 return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); 4464 default: 4465 return -EINVAL; 4466 } 4467 } 4468 4469 /* Ethtool methods */ 4470 4471 /* Set link ksettings (phy address, speed) for ethtools */ 4472 static int 4473 mvneta_ethtool_set_link_ksettings(struct net_device *ndev, 4474 const struct ethtool_link_ksettings *cmd) 4475 { 4476 struct mvneta_port *pp = netdev_priv(ndev); 4477 4478 return phylink_ethtool_ksettings_set(pp->phylink, cmd); 4479 } 4480 4481 /* Get link ksettings for ethtools */ 4482 static int 4483 mvneta_ethtool_get_link_ksettings(struct net_device *ndev, 4484 struct ethtool_link_ksettings *cmd) 4485 { 4486 struct mvneta_port *pp = netdev_priv(ndev); 4487 4488 return phylink_ethtool_ksettings_get(pp->phylink, cmd); 4489 } 4490 4491 static int mvneta_ethtool_nway_reset(struct net_device *dev) 4492 { 4493 struct mvneta_port *pp = netdev_priv(dev); 4494 4495 return phylink_ethtool_nway_reset(pp->phylink); 4496 } 4497 4498 /* Set interrupt coalescing for ethtools */ 4499 static int mvneta_ethtool_set_coalesce(struct net_device *dev, 4500 struct ethtool_coalesce *c) 4501 { 4502 struct mvneta_port *pp = netdev_priv(dev); 4503 int queue; 4504 4505 for (queue = 0; queue < rxq_number; queue++) { 4506 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 4507 rxq->time_coal = c->rx_coalesce_usecs; 4508 rxq->pkts_coal = c->rx_max_coalesced_frames; 4509 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 4510 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 4511 } 4512 4513 for (queue = 0; queue < txq_number; queue++) { 4514 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 4515 txq->done_pkts_coal = c->tx_max_coalesced_frames; 4516 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 4517 } 4518 4519 return 0; 4520 } 4521 4522 /* get coalescing for ethtools */ 4523 static int mvneta_ethtool_get_coalesce(struct net_device *dev, 4524 struct ethtool_coalesce *c) 4525 { 4526 struct mvneta_port *pp = netdev_priv(dev); 4527 4528 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; 4529 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; 4530 4531 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; 4532 return 0; 4533 } 4534 4535 4536 static void mvneta_ethtool_get_drvinfo(struct net_device *dev, 4537 struct ethtool_drvinfo *drvinfo) 4538 { 4539 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, 4540 sizeof(drvinfo->driver)); 4541 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, 4542 sizeof(drvinfo->version)); 4543 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 4544 sizeof(drvinfo->bus_info)); 4545 } 4546 4547 4548 static void mvneta_ethtool_get_ringparam(struct net_device *netdev, 4549 struct ethtool_ringparam *ring) 4550 { 4551 struct mvneta_port *pp = netdev_priv(netdev); 4552 4553 ring->rx_max_pending = MVNETA_MAX_RXD; 4554 ring->tx_max_pending = MVNETA_MAX_TXD; 4555 ring->rx_pending = pp->rx_ring_size; 4556 ring->tx_pending = pp->tx_ring_size; 4557 } 4558 4559 static int mvneta_ethtool_set_ringparam(struct net_device *dev, 4560 struct ethtool_ringparam *ring) 4561 { 4562 struct mvneta_port *pp = netdev_priv(dev); 4563 4564 if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) 4565 return -EINVAL; 4566 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 4567 ring->rx_pending : MVNETA_MAX_RXD; 4568 4569 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, 4570 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); 4571 if (pp->tx_ring_size != ring->tx_pending) 4572 netdev_warn(dev, "TX queue size set to %u (requested %u)\n", 4573 pp->tx_ring_size, ring->tx_pending); 4574 4575 if (netif_running(dev)) { 4576 mvneta_stop(dev); 4577 if (mvneta_open(dev)) { 4578 netdev_err(dev, 4579 "error on opening device after ring param change\n"); 4580 return -ENOMEM; 4581 } 4582 } 4583 4584 return 0; 4585 } 4586 4587 static void mvneta_ethtool_get_pauseparam(struct net_device *dev, 4588 struct ethtool_pauseparam *pause) 4589 { 4590 struct mvneta_port *pp = netdev_priv(dev); 4591 4592 phylink_ethtool_get_pauseparam(pp->phylink, pause); 4593 } 4594 4595 static int mvneta_ethtool_set_pauseparam(struct net_device *dev, 4596 struct ethtool_pauseparam *pause) 4597 { 4598 struct mvneta_port *pp = netdev_priv(dev); 4599 4600 return phylink_ethtool_set_pauseparam(pp->phylink, pause); 4601 } 4602 4603 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, 4604 u8 *data) 4605 { 4606 if (sset == ETH_SS_STATS) { 4607 int i; 4608 4609 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 4610 memcpy(data + i * ETH_GSTRING_LEN, 4611 mvneta_statistics[i].name, ETH_GSTRING_LEN); 4612 } 4613 } 4614 4615 static void 4616 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, 4617 struct mvneta_ethtool_stats *es) 4618 { 4619 unsigned int start; 4620 int cpu; 4621 4622 for_each_possible_cpu(cpu) { 4623 struct mvneta_pcpu_stats *stats; 4624 u64 skb_alloc_error; 4625 u64 refill_error; 4626 u64 xdp_redirect; 4627 u64 xdp_xmit_err; 4628 u64 xdp_tx_err; 4629 u64 xdp_pass; 4630 u64 xdp_drop; 4631 u64 xdp_xmit; 4632 u64 xdp_tx; 4633 4634 stats = per_cpu_ptr(pp->stats, cpu); 4635 do { 4636 start = u64_stats_fetch_begin_irq(&stats->syncp); 4637 skb_alloc_error = stats->es.skb_alloc_error; 4638 refill_error = stats->es.refill_error; 4639 xdp_redirect = stats->es.ps.xdp_redirect; 4640 xdp_pass = stats->es.ps.xdp_pass; 4641 xdp_drop = stats->es.ps.xdp_drop; 4642 xdp_xmit = stats->es.ps.xdp_xmit; 4643 xdp_xmit_err = stats->es.ps.xdp_xmit_err; 4644 xdp_tx = stats->es.ps.xdp_tx; 4645 xdp_tx_err = stats->es.ps.xdp_tx_err; 4646 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 4647 4648 es->skb_alloc_error += skb_alloc_error; 4649 es->refill_error += refill_error; 4650 es->ps.xdp_redirect += xdp_redirect; 4651 es->ps.xdp_pass += xdp_pass; 4652 es->ps.xdp_drop += xdp_drop; 4653 es->ps.xdp_xmit += xdp_xmit; 4654 es->ps.xdp_xmit_err += xdp_xmit_err; 4655 es->ps.xdp_tx += xdp_tx; 4656 es->ps.xdp_tx_err += xdp_tx_err; 4657 } 4658 } 4659 4660 static void mvneta_ethtool_update_stats(struct mvneta_port *pp) 4661 { 4662 struct mvneta_ethtool_stats stats = {}; 4663 const struct mvneta_statistic *s; 4664 void __iomem *base = pp->base; 4665 u32 high, low; 4666 u64 val; 4667 int i; 4668 4669 mvneta_ethtool_update_pcpu_stats(pp, &stats); 4670 for (i = 0, s = mvneta_statistics; 4671 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); 4672 s++, i++) { 4673 switch (s->type) { 4674 case T_REG_32: 4675 val = readl_relaxed(base + s->offset); 4676 pp->ethtool_stats[i] += val; 4677 break; 4678 case T_REG_64: 4679 /* Docs say to read low 32-bit then high */ 4680 low = readl_relaxed(base + s->offset); 4681 high = readl_relaxed(base + s->offset + 4); 4682 val = (u64)high << 32 | low; 4683 pp->ethtool_stats[i] += val; 4684 break; 4685 case T_SW: 4686 switch (s->offset) { 4687 case ETHTOOL_STAT_EEE_WAKEUP: 4688 val = phylink_get_eee_err(pp->phylink); 4689 pp->ethtool_stats[i] += val; 4690 break; 4691 case ETHTOOL_STAT_SKB_ALLOC_ERR: 4692 pp->ethtool_stats[i] = stats.skb_alloc_error; 4693 break; 4694 case ETHTOOL_STAT_REFILL_ERR: 4695 pp->ethtool_stats[i] = stats.refill_error; 4696 break; 4697 case ETHTOOL_XDP_REDIRECT: 4698 pp->ethtool_stats[i] = stats.ps.xdp_redirect; 4699 break; 4700 case ETHTOOL_XDP_PASS: 4701 pp->ethtool_stats[i] = stats.ps.xdp_pass; 4702 break; 4703 case ETHTOOL_XDP_DROP: 4704 pp->ethtool_stats[i] = stats.ps.xdp_drop; 4705 break; 4706 case ETHTOOL_XDP_TX: 4707 pp->ethtool_stats[i] = stats.ps.xdp_tx; 4708 break; 4709 case ETHTOOL_XDP_TX_ERR: 4710 pp->ethtool_stats[i] = stats.ps.xdp_tx_err; 4711 break; 4712 case ETHTOOL_XDP_XMIT: 4713 pp->ethtool_stats[i] = stats.ps.xdp_xmit; 4714 break; 4715 case ETHTOOL_XDP_XMIT_ERR: 4716 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; 4717 break; 4718 } 4719 break; 4720 } 4721 } 4722 } 4723 4724 static void mvneta_ethtool_get_stats(struct net_device *dev, 4725 struct ethtool_stats *stats, u64 *data) 4726 { 4727 struct mvneta_port *pp = netdev_priv(dev); 4728 int i; 4729 4730 mvneta_ethtool_update_stats(pp); 4731 4732 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) 4733 *data++ = pp->ethtool_stats[i]; 4734 } 4735 4736 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) 4737 { 4738 if (sset == ETH_SS_STATS) 4739 return ARRAY_SIZE(mvneta_statistics); 4740 return -EOPNOTSUPP; 4741 } 4742 4743 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) 4744 { 4745 return MVNETA_RSS_LU_TABLE_SIZE; 4746 } 4747 4748 static int mvneta_ethtool_get_rxnfc(struct net_device *dev, 4749 struct ethtool_rxnfc *info, 4750 u32 *rules __always_unused) 4751 { 4752 switch (info->cmd) { 4753 case ETHTOOL_GRXRINGS: 4754 info->data = rxq_number; 4755 return 0; 4756 case ETHTOOL_GRXFH: 4757 return -EOPNOTSUPP; 4758 default: 4759 return -EOPNOTSUPP; 4760 } 4761 } 4762 4763 static int mvneta_config_rss(struct mvneta_port *pp) 4764 { 4765 int cpu; 4766 u32 val; 4767 4768 netif_tx_stop_all_queues(pp->dev); 4769 4770 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 4771 4772 if (!pp->neta_armada3700) { 4773 /* We have to synchronise on the napi of each CPU */ 4774 for_each_online_cpu(cpu) { 4775 struct mvneta_pcpu_port *pcpu_port = 4776 per_cpu_ptr(pp->ports, cpu); 4777 4778 napi_synchronize(&pcpu_port->napi); 4779 napi_disable(&pcpu_port->napi); 4780 } 4781 } else { 4782 napi_synchronize(&pp->napi); 4783 napi_disable(&pp->napi); 4784 } 4785 4786 pp->rxq_def = pp->indir[0]; 4787 4788 /* Update unicast mapping */ 4789 mvneta_set_rx_mode(pp->dev); 4790 4791 /* Update val of portCfg register accordingly with all RxQueue types */ 4792 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); 4793 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 4794 4795 /* Update the elected CPU matching the new rxq_def */ 4796 spin_lock(&pp->lock); 4797 mvneta_percpu_elect(pp); 4798 spin_unlock(&pp->lock); 4799 4800 if (!pp->neta_armada3700) { 4801 /* We have to synchronise on the napi of each CPU */ 4802 for_each_online_cpu(cpu) { 4803 struct mvneta_pcpu_port *pcpu_port = 4804 per_cpu_ptr(pp->ports, cpu); 4805 4806 napi_enable(&pcpu_port->napi); 4807 } 4808 } else { 4809 napi_enable(&pp->napi); 4810 } 4811 4812 netif_tx_start_all_queues(pp->dev); 4813 4814 return 0; 4815 } 4816 4817 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, 4818 const u8 *key, const u8 hfunc) 4819 { 4820 struct mvneta_port *pp = netdev_priv(dev); 4821 4822 /* Current code for Armada 3700 doesn't support RSS features yet */ 4823 if (pp->neta_armada3700) 4824 return -EOPNOTSUPP; 4825 4826 /* We require at least one supported parameter to be changed 4827 * and no change in any of the unsupported parameters 4828 */ 4829 if (key || 4830 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 4831 return -EOPNOTSUPP; 4832 4833 if (!indir) 4834 return 0; 4835 4836 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); 4837 4838 return mvneta_config_rss(pp); 4839 } 4840 4841 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 4842 u8 *hfunc) 4843 { 4844 struct mvneta_port *pp = netdev_priv(dev); 4845 4846 /* Current code for Armada 3700 doesn't support RSS features yet */ 4847 if (pp->neta_armada3700) 4848 return -EOPNOTSUPP; 4849 4850 if (hfunc) 4851 *hfunc = ETH_RSS_HASH_TOP; 4852 4853 if (!indir) 4854 return 0; 4855 4856 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); 4857 4858 return 0; 4859 } 4860 4861 static void mvneta_ethtool_get_wol(struct net_device *dev, 4862 struct ethtool_wolinfo *wol) 4863 { 4864 struct mvneta_port *pp = netdev_priv(dev); 4865 4866 phylink_ethtool_get_wol(pp->phylink, wol); 4867 } 4868 4869 static int mvneta_ethtool_set_wol(struct net_device *dev, 4870 struct ethtool_wolinfo *wol) 4871 { 4872 struct mvneta_port *pp = netdev_priv(dev); 4873 int ret; 4874 4875 ret = phylink_ethtool_set_wol(pp->phylink, wol); 4876 if (!ret) 4877 device_set_wakeup_enable(&dev->dev, !!wol->wolopts); 4878 4879 return ret; 4880 } 4881 4882 static int mvneta_ethtool_get_eee(struct net_device *dev, 4883 struct ethtool_eee *eee) 4884 { 4885 struct mvneta_port *pp = netdev_priv(dev); 4886 u32 lpi_ctl0; 4887 4888 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); 4889 4890 eee->eee_enabled = pp->eee_enabled; 4891 eee->eee_active = pp->eee_active; 4892 eee->tx_lpi_enabled = pp->tx_lpi_enabled; 4893 eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; 4894 4895 return phylink_ethtool_get_eee(pp->phylink, eee); 4896 } 4897 4898 static int mvneta_ethtool_set_eee(struct net_device *dev, 4899 struct ethtool_eee *eee) 4900 { 4901 struct mvneta_port *pp = netdev_priv(dev); 4902 u32 lpi_ctl0; 4903 4904 /* The Armada 37x documents do not give limits for this other than 4905 * it being an 8-bit register. */ 4906 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) 4907 return -EINVAL; 4908 4909 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); 4910 lpi_ctl0 &= ~(0xff << 8); 4911 lpi_ctl0 |= eee->tx_lpi_timer << 8; 4912 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); 4913 4914 pp->eee_enabled = eee->eee_enabled; 4915 pp->tx_lpi_enabled = eee->tx_lpi_enabled; 4916 4917 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); 4918 4919 return phylink_ethtool_set_eee(pp->phylink, eee); 4920 } 4921 4922 static const struct net_device_ops mvneta_netdev_ops = { 4923 .ndo_open = mvneta_open, 4924 .ndo_stop = mvneta_stop, 4925 .ndo_start_xmit = mvneta_tx, 4926 .ndo_set_rx_mode = mvneta_set_rx_mode, 4927 .ndo_set_mac_address = mvneta_set_mac_addr, 4928 .ndo_change_mtu = mvneta_change_mtu, 4929 .ndo_fix_features = mvneta_fix_features, 4930 .ndo_get_stats64 = mvneta_get_stats64, 4931 .ndo_do_ioctl = mvneta_ioctl, 4932 .ndo_bpf = mvneta_xdp, 4933 .ndo_xdp_xmit = mvneta_xdp_xmit, 4934 }; 4935 4936 static const struct ethtool_ops mvneta_eth_tool_ops = { 4937 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 4938 ETHTOOL_COALESCE_MAX_FRAMES, 4939 .nway_reset = mvneta_ethtool_nway_reset, 4940 .get_link = ethtool_op_get_link, 4941 .set_coalesce = mvneta_ethtool_set_coalesce, 4942 .get_coalesce = mvneta_ethtool_get_coalesce, 4943 .get_drvinfo = mvneta_ethtool_get_drvinfo, 4944 .get_ringparam = mvneta_ethtool_get_ringparam, 4945 .set_ringparam = mvneta_ethtool_set_ringparam, 4946 .get_pauseparam = mvneta_ethtool_get_pauseparam, 4947 .set_pauseparam = mvneta_ethtool_set_pauseparam, 4948 .get_strings = mvneta_ethtool_get_strings, 4949 .get_ethtool_stats = mvneta_ethtool_get_stats, 4950 .get_sset_count = mvneta_ethtool_get_sset_count, 4951 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, 4952 .get_rxnfc = mvneta_ethtool_get_rxnfc, 4953 .get_rxfh = mvneta_ethtool_get_rxfh, 4954 .set_rxfh = mvneta_ethtool_set_rxfh, 4955 .get_link_ksettings = mvneta_ethtool_get_link_ksettings, 4956 .set_link_ksettings = mvneta_ethtool_set_link_ksettings, 4957 .get_wol = mvneta_ethtool_get_wol, 4958 .set_wol = mvneta_ethtool_set_wol, 4959 .get_eee = mvneta_ethtool_get_eee, 4960 .set_eee = mvneta_ethtool_set_eee, 4961 }; 4962 4963 /* Initialize hw */ 4964 static int mvneta_init(struct device *dev, struct mvneta_port *pp) 4965 { 4966 int queue; 4967 4968 /* Disable port */ 4969 mvneta_port_disable(pp); 4970 4971 /* Set port default values */ 4972 mvneta_defaults_set(pp); 4973 4974 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); 4975 if (!pp->txqs) 4976 return -ENOMEM; 4977 4978 /* Initialize TX descriptor rings */ 4979 for (queue = 0; queue < txq_number; queue++) { 4980 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 4981 txq->id = queue; 4982 txq->size = pp->tx_ring_size; 4983 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 4984 } 4985 4986 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); 4987 if (!pp->rxqs) 4988 return -ENOMEM; 4989 4990 /* Create Rx descriptor rings */ 4991 for (queue = 0; queue < rxq_number; queue++) { 4992 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 4993 rxq->id = queue; 4994 rxq->size = pp->rx_ring_size; 4995 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; 4996 rxq->time_coal = MVNETA_RX_COAL_USEC; 4997 rxq->buf_virt_addr 4998 = devm_kmalloc_array(pp->dev->dev.parent, 4999 rxq->size, 5000 sizeof(*rxq->buf_virt_addr), 5001 GFP_KERNEL); 5002 if (!rxq->buf_virt_addr) 5003 return -ENOMEM; 5004 } 5005 5006 return 0; 5007 } 5008 5009 /* platform glue : initialize decoding windows */ 5010 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 5011 const struct mbus_dram_target_info *dram) 5012 { 5013 u32 win_enable; 5014 u32 win_protect; 5015 int i; 5016 5017 for (i = 0; i < 6; i++) { 5018 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 5019 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 5020 5021 if (i < 4) 5022 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 5023 } 5024 5025 win_enable = 0x3f; 5026 win_protect = 0; 5027 5028 if (dram) { 5029 for (i = 0; i < dram->num_cs; i++) { 5030 const struct mbus_dram_window *cs = dram->cs + i; 5031 5032 mvreg_write(pp, MVNETA_WIN_BASE(i), 5033 (cs->base & 0xffff0000) | 5034 (cs->mbus_attr << 8) | 5035 dram->mbus_dram_target_id); 5036 5037 mvreg_write(pp, MVNETA_WIN_SIZE(i), 5038 (cs->size - 1) & 0xffff0000); 5039 5040 win_enable &= ~(1 << i); 5041 win_protect |= 3 << (2 * i); 5042 } 5043 } else { 5044 /* For Armada3700 open default 4GB Mbus window, leaving 5045 * arbitration of target/attribute to a different layer 5046 * of configuration. 5047 */ 5048 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); 5049 win_enable &= ~BIT(0); 5050 win_protect = 3; 5051 } 5052 5053 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 5054 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); 5055 } 5056 5057 /* Power up the port */ 5058 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 5059 { 5060 /* MAC Cause register should be cleared */ 5061 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 5062 5063 if (phy_mode != PHY_INTERFACE_MODE_QSGMII && 5064 phy_mode != PHY_INTERFACE_MODE_SGMII && 5065 !phy_interface_mode_is_8023z(phy_mode) && 5066 !phy_interface_mode_is_rgmii(phy_mode)) 5067 return -EINVAL; 5068 5069 return 0; 5070 } 5071 5072 /* Device initialization routine */ 5073 static int mvneta_probe(struct platform_device *pdev) 5074 { 5075 struct device_node *dn = pdev->dev.of_node; 5076 struct device_node *bm_node; 5077 struct mvneta_port *pp; 5078 struct net_device *dev; 5079 struct phylink *phylink; 5080 struct phy *comphy; 5081 const char *dt_mac_addr; 5082 char hw_mac_addr[ETH_ALEN]; 5083 phy_interface_t phy_mode; 5084 const char *mac_from; 5085 int tx_csum_limit; 5086 int err; 5087 int cpu; 5088 5089 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port), 5090 txq_number, rxq_number); 5091 if (!dev) 5092 return -ENOMEM; 5093 5094 dev->irq = irq_of_parse_and_map(dn, 0); 5095 if (dev->irq == 0) 5096 return -EINVAL; 5097 5098 err = of_get_phy_mode(dn, &phy_mode); 5099 if (err) { 5100 dev_err(&pdev->dev, "incorrect phy-mode\n"); 5101 goto err_free_irq; 5102 } 5103 5104 comphy = devm_of_phy_get(&pdev->dev, dn, NULL); 5105 if (comphy == ERR_PTR(-EPROBE_DEFER)) { 5106 err = -EPROBE_DEFER; 5107 goto err_free_irq; 5108 } else if (IS_ERR(comphy)) { 5109 comphy = NULL; 5110 } 5111 5112 pp = netdev_priv(dev); 5113 spin_lock_init(&pp->lock); 5114 5115 pp->phylink_config.dev = &dev->dev; 5116 pp->phylink_config.type = PHYLINK_NETDEV; 5117 5118 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, 5119 phy_mode, &mvneta_phylink_ops); 5120 if (IS_ERR(phylink)) { 5121 err = PTR_ERR(phylink); 5122 goto err_free_irq; 5123 } 5124 5125 dev->tx_queue_len = MVNETA_MAX_TXD; 5126 dev->watchdog_timeo = 5 * HZ; 5127 dev->netdev_ops = &mvneta_netdev_ops; 5128 5129 dev->ethtool_ops = &mvneta_eth_tool_ops; 5130 5131 pp->phylink = phylink; 5132 pp->comphy = comphy; 5133 pp->phy_interface = phy_mode; 5134 pp->dn = dn; 5135 5136 pp->rxq_def = rxq_def; 5137 pp->indir[0] = rxq_def; 5138 5139 /* Get special SoC configurations */ 5140 if (of_device_is_compatible(dn, "marvell,armada-3700-neta")) 5141 pp->neta_armada3700 = true; 5142 5143 pp->clk = devm_clk_get(&pdev->dev, "core"); 5144 if (IS_ERR(pp->clk)) 5145 pp->clk = devm_clk_get(&pdev->dev, NULL); 5146 if (IS_ERR(pp->clk)) { 5147 err = PTR_ERR(pp->clk); 5148 goto err_free_phylink; 5149 } 5150 5151 clk_prepare_enable(pp->clk); 5152 5153 pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); 5154 if (!IS_ERR(pp->clk_bus)) 5155 clk_prepare_enable(pp->clk_bus); 5156 5157 pp->base = devm_platform_ioremap_resource(pdev, 0); 5158 if (IS_ERR(pp->base)) { 5159 err = PTR_ERR(pp->base); 5160 goto err_clk; 5161 } 5162 5163 /* Alloc per-cpu port structure */ 5164 pp->ports = alloc_percpu(struct mvneta_pcpu_port); 5165 if (!pp->ports) { 5166 err = -ENOMEM; 5167 goto err_clk; 5168 } 5169 5170 /* Alloc per-cpu stats */ 5171 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); 5172 if (!pp->stats) { 5173 err = -ENOMEM; 5174 goto err_free_ports; 5175 } 5176 5177 dt_mac_addr = of_get_mac_address(dn); 5178 if (!IS_ERR(dt_mac_addr)) { 5179 mac_from = "device tree"; 5180 ether_addr_copy(dev->dev_addr, dt_mac_addr); 5181 } else { 5182 mvneta_get_mac_addr(pp, hw_mac_addr); 5183 if (is_valid_ether_addr(hw_mac_addr)) { 5184 mac_from = "hardware"; 5185 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); 5186 } else { 5187 mac_from = "random"; 5188 eth_hw_addr_random(dev); 5189 } 5190 } 5191 5192 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { 5193 if (tx_csum_limit < 0 || 5194 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { 5195 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 5196 dev_info(&pdev->dev, 5197 "Wrong TX csum limit in DT, set to %dB\n", 5198 MVNETA_TX_CSUM_DEF_SIZE); 5199 } 5200 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { 5201 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; 5202 } else { 5203 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; 5204 } 5205 5206 pp->tx_csum_limit = tx_csum_limit; 5207 5208 pp->dram_target_info = mv_mbus_dram_info(); 5209 /* Armada3700 requires setting default configuration of Mbus 5210 * windows, however without using filled mbus_dram_target_info 5211 * structure. 5212 */ 5213 if (pp->dram_target_info || pp->neta_armada3700) 5214 mvneta_conf_mbus_windows(pp, pp->dram_target_info); 5215 5216 pp->tx_ring_size = MVNETA_MAX_TXD; 5217 pp->rx_ring_size = MVNETA_MAX_RXD; 5218 5219 pp->dev = dev; 5220 SET_NETDEV_DEV(dev, &pdev->dev); 5221 5222 pp->id = global_port_id++; 5223 5224 /* Obtain access to BM resources if enabled and already initialized */ 5225 bm_node = of_parse_phandle(dn, "buffer-manager", 0); 5226 if (bm_node) { 5227 pp->bm_priv = mvneta_bm_get(bm_node); 5228 if (pp->bm_priv) { 5229 err = mvneta_bm_port_init(pdev, pp); 5230 if (err < 0) { 5231 dev_info(&pdev->dev, 5232 "use SW buffer management\n"); 5233 mvneta_bm_put(pp->bm_priv); 5234 pp->bm_priv = NULL; 5235 } 5236 } 5237 /* Set RX packet offset correction for platforms, whose 5238 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit 5239 * platforms and 0B for 32-bit ones. 5240 */ 5241 pp->rx_offset_correction = max(0, 5242 NET_SKB_PAD - 5243 MVNETA_RX_PKT_OFFSET_CORRECTION); 5244 } 5245 of_node_put(bm_node); 5246 5247 /* sw buffer management */ 5248 if (!pp->bm_priv) 5249 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 5250 5251 err = mvneta_init(&pdev->dev, pp); 5252 if (err < 0) 5253 goto err_netdev; 5254 5255 err = mvneta_port_power_up(pp, pp->phy_interface); 5256 if (err < 0) { 5257 dev_err(&pdev->dev, "can't power up port\n"); 5258 return err; 5259 } 5260 5261 /* Armada3700 network controller does not support per-cpu 5262 * operation, so only single NAPI should be initialized. 5263 */ 5264 if (pp->neta_armada3700) { 5265 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); 5266 } else { 5267 for_each_present_cpu(cpu) { 5268 struct mvneta_pcpu_port *port = 5269 per_cpu_ptr(pp->ports, cpu); 5270 5271 netif_napi_add(dev, &port->napi, mvneta_poll, 5272 NAPI_POLL_WEIGHT); 5273 port->pp = pp; 5274 } 5275 } 5276 5277 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5278 NETIF_F_TSO | NETIF_F_RXCSUM; 5279 dev->hw_features |= dev->features; 5280 dev->vlan_features |= dev->features; 5281 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 5282 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; 5283 5284 /* MTU range: 68 - 9676 */ 5285 dev->min_mtu = ETH_MIN_MTU; 5286 /* 9676 == 9700 - 20 and rounding to 8 */ 5287 dev->max_mtu = 9676; 5288 5289 err = register_netdev(dev); 5290 if (err < 0) { 5291 dev_err(&pdev->dev, "failed to register\n"); 5292 goto err_netdev; 5293 } 5294 5295 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 5296 dev->dev_addr); 5297 5298 platform_set_drvdata(pdev, pp->dev); 5299 5300 return 0; 5301 5302 err_netdev: 5303 if (pp->bm_priv) { 5304 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 5305 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 5306 1 << pp->id); 5307 mvneta_bm_put(pp->bm_priv); 5308 } 5309 free_percpu(pp->stats); 5310 err_free_ports: 5311 free_percpu(pp->ports); 5312 err_clk: 5313 clk_disable_unprepare(pp->clk_bus); 5314 clk_disable_unprepare(pp->clk); 5315 err_free_phylink: 5316 if (pp->phylink) 5317 phylink_destroy(pp->phylink); 5318 err_free_irq: 5319 irq_dispose_mapping(dev->irq); 5320 return err; 5321 } 5322 5323 /* Device removal routine */ 5324 static int mvneta_remove(struct platform_device *pdev) 5325 { 5326 struct net_device *dev = platform_get_drvdata(pdev); 5327 struct mvneta_port *pp = netdev_priv(dev); 5328 5329 unregister_netdev(dev); 5330 clk_disable_unprepare(pp->clk_bus); 5331 clk_disable_unprepare(pp->clk); 5332 free_percpu(pp->ports); 5333 free_percpu(pp->stats); 5334 irq_dispose_mapping(dev->irq); 5335 phylink_destroy(pp->phylink); 5336 5337 if (pp->bm_priv) { 5338 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); 5339 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 5340 1 << pp->id); 5341 mvneta_bm_put(pp->bm_priv); 5342 } 5343 5344 return 0; 5345 } 5346 5347 #ifdef CONFIG_PM_SLEEP 5348 static int mvneta_suspend(struct device *device) 5349 { 5350 int queue; 5351 struct net_device *dev = dev_get_drvdata(device); 5352 struct mvneta_port *pp = netdev_priv(dev); 5353 5354 if (!netif_running(dev)) 5355 goto clean_exit; 5356 5357 if (!pp->neta_armada3700) { 5358 spin_lock(&pp->lock); 5359 pp->is_stopped = true; 5360 spin_unlock(&pp->lock); 5361 5362 cpuhp_state_remove_instance_nocalls(online_hpstate, 5363 &pp->node_online); 5364 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 5365 &pp->node_dead); 5366 } 5367 5368 rtnl_lock(); 5369 mvneta_stop_dev(pp); 5370 rtnl_unlock(); 5371 5372 for (queue = 0; queue < rxq_number; queue++) { 5373 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5374 5375 mvneta_rxq_drop_pkts(pp, rxq); 5376 } 5377 5378 for (queue = 0; queue < txq_number; queue++) { 5379 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5380 5381 mvneta_txq_hw_deinit(pp, txq); 5382 } 5383 5384 clean_exit: 5385 netif_device_detach(dev); 5386 clk_disable_unprepare(pp->clk_bus); 5387 clk_disable_unprepare(pp->clk); 5388 5389 return 0; 5390 } 5391 5392 static int mvneta_resume(struct device *device) 5393 { 5394 struct platform_device *pdev = to_platform_device(device); 5395 struct net_device *dev = dev_get_drvdata(device); 5396 struct mvneta_port *pp = netdev_priv(dev); 5397 int err, queue; 5398 5399 clk_prepare_enable(pp->clk); 5400 if (!IS_ERR(pp->clk_bus)) 5401 clk_prepare_enable(pp->clk_bus); 5402 if (pp->dram_target_info || pp->neta_armada3700) 5403 mvneta_conf_mbus_windows(pp, pp->dram_target_info); 5404 if (pp->bm_priv) { 5405 err = mvneta_bm_port_init(pdev, pp); 5406 if (err < 0) { 5407 dev_info(&pdev->dev, "use SW buffer management\n"); 5408 pp->rx_offset_correction = MVNETA_SKB_HEADROOM; 5409 pp->bm_priv = NULL; 5410 } 5411 } 5412 mvneta_defaults_set(pp); 5413 err = mvneta_port_power_up(pp, pp->phy_interface); 5414 if (err < 0) { 5415 dev_err(device, "can't power up port\n"); 5416 return err; 5417 } 5418 5419 netif_device_attach(dev); 5420 5421 if (!netif_running(dev)) 5422 return 0; 5423 5424 for (queue = 0; queue < rxq_number; queue++) { 5425 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 5426 5427 rxq->next_desc_to_proc = 0; 5428 mvneta_rxq_hw_init(pp, rxq); 5429 } 5430 5431 for (queue = 0; queue < txq_number; queue++) { 5432 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 5433 5434 txq->next_desc_to_proc = 0; 5435 mvneta_txq_hw_init(pp, txq); 5436 } 5437 5438 if (!pp->neta_armada3700) { 5439 spin_lock(&pp->lock); 5440 pp->is_stopped = false; 5441 spin_unlock(&pp->lock); 5442 cpuhp_state_add_instance_nocalls(online_hpstate, 5443 &pp->node_online); 5444 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, 5445 &pp->node_dead); 5446 } 5447 5448 rtnl_lock(); 5449 mvneta_start_dev(pp); 5450 rtnl_unlock(); 5451 mvneta_set_rx_mode(dev); 5452 5453 return 0; 5454 } 5455 #endif 5456 5457 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume); 5458 5459 static const struct of_device_id mvneta_match[] = { 5460 { .compatible = "marvell,armada-370-neta" }, 5461 { .compatible = "marvell,armada-xp-neta" }, 5462 { .compatible = "marvell,armada-3700-neta" }, 5463 { } 5464 }; 5465 MODULE_DEVICE_TABLE(of, mvneta_match); 5466 5467 static struct platform_driver mvneta_driver = { 5468 .probe = mvneta_probe, 5469 .remove = mvneta_remove, 5470 .driver = { 5471 .name = MVNETA_DRIVER_NAME, 5472 .of_match_table = mvneta_match, 5473 .pm = &mvneta_pm_ops, 5474 }, 5475 }; 5476 5477 static int __init mvneta_driver_init(void) 5478 { 5479 int ret; 5480 5481 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online", 5482 mvneta_cpu_online, 5483 mvneta_cpu_down_prepare); 5484 if (ret < 0) 5485 goto out; 5486 online_hpstate = ret; 5487 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead", 5488 NULL, mvneta_cpu_dead); 5489 if (ret) 5490 goto err_dead; 5491 5492 ret = platform_driver_register(&mvneta_driver); 5493 if (ret) 5494 goto err; 5495 return 0; 5496 5497 err: 5498 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); 5499 err_dead: 5500 cpuhp_remove_multi_state(online_hpstate); 5501 out: 5502 return ret; 5503 } 5504 module_init(mvneta_driver_init); 5505 5506 static void __exit mvneta_driver_exit(void) 5507 { 5508 platform_driver_unregister(&mvneta_driver); 5509 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); 5510 cpuhp_remove_multi_state(online_hpstate); 5511 } 5512 module_exit(mvneta_driver_exit); 5513 5514 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 5515 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 5516 MODULE_LICENSE("GPL"); 5517 5518 module_param(rxq_number, int, 0444); 5519 module_param(txq_number, int, 0444); 5520 5521 module_param(rxq_def, int, 0444); 5522 module_param(rx_copybreak, int, 0644); 5523