1 /* 2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Rami Rosen <rosenr@marvell.com> 7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 8 * 9 * This file is licensed under the terms of the GNU General Public 10 * License version 2. This program is licensed "as is" without any 11 * warranty of any kind, whether express or implied. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/netdevice.h> 16 #include <linux/etherdevice.h> 17 #include <linux/platform_device.h> 18 #include <linux/skbuff.h> 19 #include <linux/inetdevice.h> 20 #include <linux/mbus.h> 21 #include <linux/module.h> 22 #include <linux/interrupt.h> 23 #include <linux/if_vlan.h> 24 #include <net/ip.h> 25 #include <net/ipv6.h> 26 #include <linux/io.h> 27 #include <net/tso.h> 28 #include <linux/of.h> 29 #include <linux/of_irq.h> 30 #include <linux/of_mdio.h> 31 #include <linux/of_net.h> 32 #include <linux/of_address.h> 33 #include <linux/phy.h> 34 #include <linux/clk.h> 35 36 /* Registers */ 37 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 38 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) 39 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 40 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 41 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 42 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) 43 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) 44 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) 45 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 46 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) 47 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) 48 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff 49 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) 50 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 51 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 52 #define MVNETA_PORT_RX_RESET 0x1cc0 53 #define MVNETA_PORT_RX_DMA_RESET BIT(0) 54 #define MVNETA_PHY_ADDR 0x2000 55 #define MVNETA_PHY_ADDR_MASK 0x1f 56 #define MVNETA_MBUS_RETRY 0x2010 57 #define MVNETA_UNIT_INTR_CAUSE 0x2080 58 #define MVNETA_UNIT_CONTROL 0x20B0 59 #define MVNETA_PHY_POLLING_ENABLE BIT(1) 60 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) 61 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 62 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 63 #define MVNETA_BASE_ADDR_ENABLE 0x2290 64 #define MVNETA_PORT_CONFIG 0x2400 65 #define MVNETA_UNI_PROMISC_MODE BIT(0) 66 #define MVNETA_DEF_RXQ(q) ((q) << 1) 67 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) 68 #define MVNETA_TX_UNSET_ERR_SUM BIT(12) 69 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) 70 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) 71 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) 72 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) 73 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ 74 MVNETA_DEF_RXQ_ARP(q) | \ 75 MVNETA_DEF_RXQ_TCP(q) | \ 76 MVNETA_DEF_RXQ_UDP(q) | \ 77 MVNETA_DEF_RXQ_BPDU(q) | \ 78 MVNETA_TX_UNSET_ERR_SUM | \ 79 MVNETA_RX_CSUM_WITH_PSEUDO_HDR) 80 #define MVNETA_PORT_CONFIG_EXTEND 0x2404 81 #define MVNETA_MAC_ADDR_LOW 0x2414 82 #define MVNETA_MAC_ADDR_HIGH 0x2418 83 #define MVNETA_SDMA_CONFIG 0x241c 84 #define MVNETA_SDMA_BRST_SIZE_16 4 85 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 86 #define MVNETA_RX_NO_DATA_SWAP BIT(4) 87 #define MVNETA_TX_NO_DATA_SWAP BIT(5) 88 #define MVNETA_DESC_SWAP BIT(6) 89 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 90 #define MVNETA_PORT_STATUS 0x2444 91 #define MVNETA_TX_IN_PRGRS BIT(1) 92 #define MVNETA_TX_FIFO_EMPTY BIT(8) 93 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c 94 #define MVNETA_SERDES_CFG 0x24A0 95 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 96 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 97 #define MVNETA_TYPE_PRIO 0x24bc 98 #define MVNETA_FORCE_UNI BIT(21) 99 #define MVNETA_TXQ_CMD_1 0x24e4 100 #define MVNETA_TXQ_CMD 0x2448 101 #define MVNETA_TXQ_DISABLE_SHIFT 8 102 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff 103 #define MVNETA_ACC_MODE 0x2500 104 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 105 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff 106 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 107 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) 108 109 /* Exception Interrupt Port/Queue Cause register */ 110 111 #define MVNETA_INTR_NEW_CAUSE 0x25a0 112 #define MVNETA_INTR_NEW_MASK 0x25a4 113 114 /* bits 0..7 = TXQ SENT, one bit per queue. 115 * bits 8..15 = RXQ OCCUP, one bit per queue. 116 * bits 16..23 = RXQ FREE, one bit per queue. 117 * bit 29 = OLD_REG_SUM, see old reg ? 118 * bit 30 = TX_ERR_SUM, one bit for 4 ports 119 * bit 31 = MISC_SUM, one bit for 4 ports 120 */ 121 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) 122 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) 123 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 124 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) 125 126 #define MVNETA_INTR_OLD_CAUSE 0x25a8 127 #define MVNETA_INTR_OLD_MASK 0x25ac 128 129 /* Data Path Port/Queue Cause Register */ 130 #define MVNETA_INTR_MISC_CAUSE 0x25b0 131 #define MVNETA_INTR_MISC_MASK 0x25b4 132 133 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) 134 #define MVNETA_CAUSE_LINK_CHANGE BIT(1) 135 #define MVNETA_CAUSE_PTP BIT(4) 136 137 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) 138 #define MVNETA_CAUSE_RX_OVERRUN BIT(8) 139 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) 140 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) 141 #define MVNETA_CAUSE_TX_UNDERUN BIT(11) 142 #define MVNETA_CAUSE_PRBS_ERR BIT(12) 143 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) 144 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) 145 146 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 147 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) 148 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) 149 150 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 151 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) 152 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) 153 154 #define MVNETA_INTR_ENABLE 0x25b8 155 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 156 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF 157 158 #define MVNETA_RXQ_CMD 0x2680 159 #define MVNETA_RXQ_DISABLE_SHIFT 8 160 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff 161 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) 162 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) 163 #define MVNETA_GMAC_CTRL_0 0x2c00 164 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 165 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 166 #define MVNETA_GMAC0_PORT_ENABLE BIT(0) 167 #define MVNETA_GMAC_CTRL_2 0x2c08 168 #define MVNETA_GMAC2_PCS_ENABLE BIT(3) 169 #define MVNETA_GMAC2_PORT_RGMII BIT(4) 170 #define MVNETA_GMAC2_PORT_RESET BIT(6) 171 #define MVNETA_GMAC_STATUS 0x2c10 172 #define MVNETA_GMAC_LINK_UP BIT(0) 173 #define MVNETA_GMAC_SPEED_1000 BIT(1) 174 #define MVNETA_GMAC_SPEED_100 BIT(2) 175 #define MVNETA_GMAC_FULL_DUPLEX BIT(3) 176 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) 177 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) 178 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) 179 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) 180 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 181 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 182 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 183 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 184 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 185 #define MVNETA_GMAC_AN_SPEED_EN BIT(7) 186 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 187 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) 188 #define MVNETA_MIB_COUNTERS_BASE 0x3080 189 #define MVNETA_MIB_LATE_COLLISION 0x7c 190 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 191 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 192 #define MVNETA_DA_FILT_UCAST_BASE 0x3600 193 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) 194 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) 195 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 196 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) 197 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) 198 #define MVNETA_TXQ_DEC_SENT_SHIFT 16 199 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) 200 #define MVNETA_TXQ_SENT_DESC_SHIFT 16 201 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 202 #define MVNETA_PORT_TX_RESET 0x3cf0 203 #define MVNETA_PORT_TX_DMA_RESET BIT(0) 204 #define MVNETA_TX_MTU 0x3e0c 205 #define MVNETA_TX_TOKEN_SIZE 0x3e14 206 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff 207 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) 208 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff 209 210 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 211 212 /* Descriptor ring Macros */ 213 #define MVNETA_QUEUE_NEXT_DESC(q, index) \ 214 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 215 216 /* Various constants */ 217 218 /* Coalescing */ 219 #define MVNETA_TXDONE_COAL_PKTS 1 220 #define MVNETA_RX_COAL_PKTS 32 221 #define MVNETA_RX_COAL_USEC 100 222 223 /* The two bytes Marvell header. Either contains a special value used 224 * by Marvell switches when a specific hardware mode is enabled (not 225 * supported by this driver) or is filled automatically by zeroes on 226 * the RX side. Those two bytes being at the front of the Ethernet 227 * header, they allow to have the IP header aligned on a 4 bytes 228 * boundary automatically: the hardware skips those two bytes on its 229 * own. 230 */ 231 #define MVNETA_MH_SIZE 2 232 233 #define MVNETA_VLAN_TAG_LEN 4 234 235 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 236 #define MVNETA_TX_CSUM_MAX_SIZE 9800 237 #define MVNETA_ACC_MODE_EXT 1 238 239 /* Timeout constants */ 240 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 241 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 242 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 243 244 #define MVNETA_TX_MTU_MAX 0x3ffff 245 246 /* TSO header size */ 247 #define TSO_HEADER_SIZE 128 248 249 /* Max number of Rx descriptors */ 250 #define MVNETA_MAX_RXD 128 251 252 /* Max number of Tx descriptors */ 253 #define MVNETA_MAX_TXD 532 254 255 /* Max number of allowed TCP segments for software TSO */ 256 #define MVNETA_MAX_TSO_SEGS 100 257 258 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 259 260 /* descriptor aligned size */ 261 #define MVNETA_DESC_ALIGNED_SIZE 32 262 263 #define MVNETA_RX_PKT_SIZE(mtu) \ 264 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 265 ETH_HLEN + ETH_FCS_LEN, \ 266 MVNETA_CPU_D_CACHE_LINE_SIZE) 267 268 #define IS_TSO_HEADER(txq, addr) \ 269 ((addr >= txq->tso_hdrs_phys) && \ 270 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) 271 272 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 273 274 struct mvneta_pcpu_stats { 275 struct u64_stats_sync syncp; 276 u64 rx_packets; 277 u64 rx_bytes; 278 u64 tx_packets; 279 u64 tx_bytes; 280 }; 281 282 struct mvneta_port { 283 int pkt_size; 284 unsigned int frag_size; 285 void __iomem *base; 286 struct mvneta_rx_queue *rxqs; 287 struct mvneta_tx_queue *txqs; 288 struct net_device *dev; 289 290 u32 cause_rx_tx; 291 struct napi_struct napi; 292 293 /* Core clock */ 294 struct clk *clk; 295 u8 mcast_count[256]; 296 u16 tx_ring_size; 297 u16 rx_ring_size; 298 struct mvneta_pcpu_stats *stats; 299 300 struct mii_bus *mii_bus; 301 struct phy_device *phy_dev; 302 phy_interface_t phy_interface; 303 struct device_node *phy_node; 304 unsigned int link; 305 unsigned int duplex; 306 unsigned int speed; 307 }; 308 309 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the 310 * layout of the transmit and reception DMA descriptors, and their 311 * layout is therefore defined by the hardware design 312 */ 313 314 #define MVNETA_TX_L3_OFF_SHIFT 0 315 #define MVNETA_TX_IP_HLEN_SHIFT 8 316 #define MVNETA_TX_L4_UDP BIT(16) 317 #define MVNETA_TX_L3_IP6 BIT(17) 318 #define MVNETA_TXD_IP_CSUM BIT(18) 319 #define MVNETA_TXD_Z_PAD BIT(19) 320 #define MVNETA_TXD_L_DESC BIT(20) 321 #define MVNETA_TXD_F_DESC BIT(21) 322 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ 323 MVNETA_TXD_L_DESC | \ 324 MVNETA_TXD_F_DESC) 325 #define MVNETA_TX_L4_CSUM_FULL BIT(30) 326 #define MVNETA_TX_L4_CSUM_NOT BIT(31) 327 328 #define MVNETA_RXD_ERR_CRC 0x0 329 #define MVNETA_RXD_ERR_SUMMARY BIT(16) 330 #define MVNETA_RXD_ERR_OVERRUN BIT(17) 331 #define MVNETA_RXD_ERR_LEN BIT(18) 332 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) 333 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) 334 #define MVNETA_RXD_L3_IP4 BIT(25) 335 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) 336 #define MVNETA_RXD_L4_CSUM_OK BIT(30) 337 338 #if defined(__LITTLE_ENDIAN) 339 struct mvneta_tx_desc { 340 u32 command; /* Options used by HW for packet transmitting.*/ 341 u16 reserverd1; /* csum_l4 (for future use) */ 342 u16 data_size; /* Data size of transmitted packet in bytes */ 343 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 344 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 345 u32 reserved3[4]; /* Reserved - (for future use) */ 346 }; 347 348 struct mvneta_rx_desc { 349 u32 status; /* Info about received packet */ 350 u16 reserved1; /* pnc_info - (for future use, PnC) */ 351 u16 data_size; /* Size of received packet in bytes */ 352 353 u32 buf_phys_addr; /* Physical address of the buffer */ 354 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 355 356 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 357 u16 reserved3; /* prefetch_cmd, for future use */ 358 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 359 360 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 361 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 362 }; 363 #else 364 struct mvneta_tx_desc { 365 u16 data_size; /* Data size of transmitted packet in bytes */ 366 u16 reserverd1; /* csum_l4 (for future use) */ 367 u32 command; /* Options used by HW for packet transmitting.*/ 368 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 369 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 370 u32 reserved3[4]; /* Reserved - (for future use) */ 371 }; 372 373 struct mvneta_rx_desc { 374 u16 data_size; /* Size of received packet in bytes */ 375 u16 reserved1; /* pnc_info - (for future use, PnC) */ 376 u32 status; /* Info about received packet */ 377 378 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 379 u32 buf_phys_addr; /* Physical address of the buffer */ 380 381 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 382 u16 reserved3; /* prefetch_cmd, for future use */ 383 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 384 385 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 386 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 387 }; 388 #endif 389 390 struct mvneta_tx_queue { 391 /* Number of this TX queue, in the range 0-7 */ 392 u8 id; 393 394 /* Number of TX DMA descriptors in the descriptor ring */ 395 int size; 396 397 /* Number of currently used TX DMA descriptor in the 398 * descriptor ring 399 */ 400 int count; 401 int tx_stop_threshold; 402 int tx_wake_threshold; 403 404 /* Array of transmitted skb */ 405 struct sk_buff **tx_skb; 406 407 /* Index of last TX DMA descriptor that was inserted */ 408 int txq_put_index; 409 410 /* Index of the TX DMA descriptor to be cleaned up */ 411 int txq_get_index; 412 413 u32 done_pkts_coal; 414 415 /* Virtual address of the TX DMA descriptors array */ 416 struct mvneta_tx_desc *descs; 417 418 /* DMA address of the TX DMA descriptors array */ 419 dma_addr_t descs_phys; 420 421 /* Index of the last TX DMA descriptor */ 422 int last_desc; 423 424 /* Index of the next TX DMA descriptor to process */ 425 int next_desc_to_proc; 426 427 /* DMA buffers for TSO headers */ 428 char *tso_hdrs; 429 430 /* DMA address of TSO headers */ 431 dma_addr_t tso_hdrs_phys; 432 }; 433 434 struct mvneta_rx_queue { 435 /* rx queue number, in the range 0-7 */ 436 u8 id; 437 438 /* num of rx descriptors in the rx descriptor ring */ 439 int size; 440 441 /* counter of times when mvneta_refill() failed */ 442 int missed; 443 444 u32 pkts_coal; 445 u32 time_coal; 446 447 /* Virtual address of the RX DMA descriptors array */ 448 struct mvneta_rx_desc *descs; 449 450 /* DMA address of the RX DMA descriptors array */ 451 dma_addr_t descs_phys; 452 453 /* Index of the last RX DMA descriptor */ 454 int last_desc; 455 456 /* Index of the next RX DMA descriptor to process */ 457 int next_desc_to_proc; 458 }; 459 460 /* The hardware supports eight (8) rx queues, but we are only allowing 461 * the first one to be used. Therefore, let's just allocate one queue. 462 */ 463 static int rxq_number = 1; 464 static int txq_number = 8; 465 466 static int rxq_def; 467 468 static int rx_copybreak __read_mostly = 256; 469 470 #define MVNETA_DRIVER_NAME "mvneta" 471 #define MVNETA_DRIVER_VERSION "1.0" 472 473 /* Utility/helper methods */ 474 475 /* Write helper method */ 476 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) 477 { 478 writel(data, pp->base + offset); 479 } 480 481 /* Read helper method */ 482 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) 483 { 484 return readl(pp->base + offset); 485 } 486 487 /* Increment txq get counter */ 488 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) 489 { 490 txq->txq_get_index++; 491 if (txq->txq_get_index == txq->size) 492 txq->txq_get_index = 0; 493 } 494 495 /* Increment txq put counter */ 496 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) 497 { 498 txq->txq_put_index++; 499 if (txq->txq_put_index == txq->size) 500 txq->txq_put_index = 0; 501 } 502 503 504 /* Clear all MIB counters */ 505 static void mvneta_mib_counters_clear(struct mvneta_port *pp) 506 { 507 int i; 508 u32 dummy; 509 510 /* Perform dummy reads from MIB counters */ 511 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) 512 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); 513 } 514 515 /* Get System Network Statistics */ 516 struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev, 517 struct rtnl_link_stats64 *stats) 518 { 519 struct mvneta_port *pp = netdev_priv(dev); 520 unsigned int start; 521 int cpu; 522 523 for_each_possible_cpu(cpu) { 524 struct mvneta_pcpu_stats *cpu_stats; 525 u64 rx_packets; 526 u64 rx_bytes; 527 u64 tx_packets; 528 u64 tx_bytes; 529 530 cpu_stats = per_cpu_ptr(pp->stats, cpu); 531 do { 532 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 533 rx_packets = cpu_stats->rx_packets; 534 rx_bytes = cpu_stats->rx_bytes; 535 tx_packets = cpu_stats->tx_packets; 536 tx_bytes = cpu_stats->tx_bytes; 537 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 538 539 stats->rx_packets += rx_packets; 540 stats->rx_bytes += rx_bytes; 541 stats->tx_packets += tx_packets; 542 stats->tx_bytes += tx_bytes; 543 } 544 545 stats->rx_errors = dev->stats.rx_errors; 546 stats->rx_dropped = dev->stats.rx_dropped; 547 548 stats->tx_dropped = dev->stats.tx_dropped; 549 550 return stats; 551 } 552 553 /* Rx descriptors helper methods */ 554 555 /* Checks whether the RX descriptor having this status is both the first 556 * and the last descriptor for the RX packet. Each RX packet is currently 557 * received through a single RX descriptor, so not having each RX 558 * descriptor with its first and last bits set is an error 559 */ 560 static int mvneta_rxq_desc_is_first_last(u32 status) 561 { 562 return (status & MVNETA_RXD_FIRST_LAST_DESC) == 563 MVNETA_RXD_FIRST_LAST_DESC; 564 } 565 566 /* Add number of descriptors ready to receive new packets */ 567 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, 568 struct mvneta_rx_queue *rxq, 569 int ndescs) 570 { 571 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can 572 * be added at once 573 */ 574 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { 575 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 576 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << 577 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 578 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; 579 } 580 581 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 582 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 583 } 584 585 /* Get number of RX descriptors occupied by received packets */ 586 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, 587 struct mvneta_rx_queue *rxq) 588 { 589 u32 val; 590 591 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); 592 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; 593 } 594 595 /* Update num of rx desc called upon return from rx path or 596 * from mvneta_rxq_drop_pkts(). 597 */ 598 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, 599 struct mvneta_rx_queue *rxq, 600 int rx_done, int rx_filled) 601 { 602 u32 val; 603 604 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { 605 val = rx_done | 606 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); 607 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 608 return; 609 } 610 611 /* Only 255 descriptors can be added at once */ 612 while ((rx_done > 0) || (rx_filled > 0)) { 613 if (rx_done <= 0xff) { 614 val = rx_done; 615 rx_done = 0; 616 } else { 617 val = 0xff; 618 rx_done -= 0xff; 619 } 620 if (rx_filled <= 0xff) { 621 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 622 rx_filled = 0; 623 } else { 624 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 625 rx_filled -= 0xff; 626 } 627 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 628 } 629 } 630 631 /* Get pointer to next RX descriptor to be processed by SW */ 632 static struct mvneta_rx_desc * 633 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) 634 { 635 int rx_desc = rxq->next_desc_to_proc; 636 637 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); 638 prefetch(rxq->descs + rxq->next_desc_to_proc); 639 return rxq->descs + rx_desc; 640 } 641 642 /* Change maximum receive size of the port. */ 643 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) 644 { 645 u32 val; 646 647 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 648 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; 649 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << 650 MVNETA_GMAC_MAX_RX_SIZE_SHIFT; 651 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 652 } 653 654 655 /* Set rx queue offset */ 656 static void mvneta_rxq_offset_set(struct mvneta_port *pp, 657 struct mvneta_rx_queue *rxq, 658 int offset) 659 { 660 u32 val; 661 662 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 663 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; 664 665 /* Offset is in */ 666 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); 667 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 668 } 669 670 671 /* Tx descriptors helper methods */ 672 673 /* Update HW with number of TX descriptors to be sent */ 674 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, 675 struct mvneta_tx_queue *txq, 676 int pend_desc) 677 { 678 u32 val; 679 680 /* Only 255 descriptors can be added at once ; Assume caller 681 * process TX desriptors in quanta less than 256 682 */ 683 val = pend_desc; 684 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 685 } 686 687 /* Get pointer to next TX descriptor to be processed (send) by HW */ 688 static struct mvneta_tx_desc * 689 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) 690 { 691 int tx_desc = txq->next_desc_to_proc; 692 693 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); 694 return txq->descs + tx_desc; 695 } 696 697 /* Release the last allocated TX descriptor. Useful to handle DMA 698 * mapping failures in the TX path. 699 */ 700 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) 701 { 702 if (txq->next_desc_to_proc == 0) 703 txq->next_desc_to_proc = txq->last_desc - 1; 704 else 705 txq->next_desc_to_proc--; 706 } 707 708 /* Set rxq buf size */ 709 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, 710 struct mvneta_rx_queue *rxq, 711 int buf_size) 712 { 713 u32 val; 714 715 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); 716 717 val &= ~MVNETA_RXQ_BUF_SIZE_MASK; 718 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); 719 720 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); 721 } 722 723 /* Disable buffer management (BM) */ 724 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, 725 struct mvneta_rx_queue *rxq) 726 { 727 u32 val; 728 729 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 730 val &= ~MVNETA_RXQ_HW_BUF_ALLOC; 731 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 732 } 733 734 /* Start the Ethernet port RX and TX activity */ 735 static void mvneta_port_up(struct mvneta_port *pp) 736 { 737 int queue; 738 u32 q_map; 739 740 /* Enable all initialized TXs. */ 741 mvneta_mib_counters_clear(pp); 742 q_map = 0; 743 for (queue = 0; queue < txq_number; queue++) { 744 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 745 if (txq->descs != NULL) 746 q_map |= (1 << queue); 747 } 748 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 749 750 /* Enable all initialized RXQs. */ 751 q_map = 0; 752 for (queue = 0; queue < rxq_number; queue++) { 753 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 754 if (rxq->descs != NULL) 755 q_map |= (1 << queue); 756 } 757 758 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); 759 } 760 761 /* Stop the Ethernet port activity */ 762 static void mvneta_port_down(struct mvneta_port *pp) 763 { 764 u32 val; 765 int count; 766 767 /* Stop Rx port activity. Check port Rx activity. */ 768 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; 769 770 /* Issue stop command for active channels only */ 771 if (val != 0) 772 mvreg_write(pp, MVNETA_RXQ_CMD, 773 val << MVNETA_RXQ_DISABLE_SHIFT); 774 775 /* Wait for all Rx activity to terminate. */ 776 count = 0; 777 do { 778 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { 779 netdev_warn(pp->dev, 780 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", 781 val); 782 break; 783 } 784 mdelay(1); 785 786 val = mvreg_read(pp, MVNETA_RXQ_CMD); 787 } while (val & 0xff); 788 789 /* Stop Tx port activity. Check port Tx activity. Issue stop 790 * command for active channels only 791 */ 792 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; 793 794 if (val != 0) 795 mvreg_write(pp, MVNETA_TXQ_CMD, 796 (val << MVNETA_TXQ_DISABLE_SHIFT)); 797 798 /* Wait for all Tx activity to terminate. */ 799 count = 0; 800 do { 801 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { 802 netdev_warn(pp->dev, 803 "TIMEOUT for TX stopped status=0x%08x\n", 804 val); 805 break; 806 } 807 mdelay(1); 808 809 /* Check TX Command reg that all Txqs are stopped */ 810 val = mvreg_read(pp, MVNETA_TXQ_CMD); 811 812 } while (val & 0xff); 813 814 /* Double check to verify that TX FIFO is empty */ 815 count = 0; 816 do { 817 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { 818 netdev_warn(pp->dev, 819 "TX FIFO empty timeout status=0x08%x\n", 820 val); 821 break; 822 } 823 mdelay(1); 824 825 val = mvreg_read(pp, MVNETA_PORT_STATUS); 826 } while (!(val & MVNETA_TX_FIFO_EMPTY) && 827 (val & MVNETA_TX_IN_PRGRS)); 828 829 udelay(200); 830 } 831 832 /* Enable the port by setting the port enable bit of the MAC control register */ 833 static void mvneta_port_enable(struct mvneta_port *pp) 834 { 835 u32 val; 836 837 /* Enable port */ 838 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 839 val |= MVNETA_GMAC0_PORT_ENABLE; 840 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 841 } 842 843 /* Disable the port and wait for about 200 usec before retuning */ 844 static void mvneta_port_disable(struct mvneta_port *pp) 845 { 846 u32 val; 847 848 /* Reset the Enable bit in the Serial Control Register */ 849 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 850 val &= ~MVNETA_GMAC0_PORT_ENABLE; 851 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 852 853 udelay(200); 854 } 855 856 /* Multicast tables methods */ 857 858 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ 859 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) 860 { 861 int offset; 862 u32 val; 863 864 if (queue == -1) { 865 val = 0; 866 } else { 867 val = 0x1 | (queue << 1); 868 val |= (val << 24) | (val << 16) | (val << 8); 869 } 870 871 for (offset = 0; offset <= 0xc; offset += 4) 872 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); 873 } 874 875 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ 876 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) 877 { 878 int offset; 879 u32 val; 880 881 if (queue == -1) { 882 val = 0; 883 } else { 884 val = 0x1 | (queue << 1); 885 val |= (val << 24) | (val << 16) | (val << 8); 886 } 887 888 for (offset = 0; offset <= 0xfc; offset += 4) 889 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); 890 891 } 892 893 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ 894 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) 895 { 896 int offset; 897 u32 val; 898 899 if (queue == -1) { 900 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); 901 val = 0; 902 } else { 903 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); 904 val = 0x1 | (queue << 1); 905 val |= (val << 24) | (val << 16) | (val << 8); 906 } 907 908 for (offset = 0; offset <= 0xfc; offset += 4) 909 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); 910 } 911 912 /* This method sets defaults to the NETA port: 913 * Clears interrupt Cause and Mask registers. 914 * Clears all MAC tables. 915 * Sets defaults to all registers. 916 * Resets RX and TX descriptor rings. 917 * Resets PHY. 918 * This method can be called after mvneta_port_down() to return the port 919 * settings to defaults. 920 */ 921 static void mvneta_defaults_set(struct mvneta_port *pp) 922 { 923 int cpu; 924 int queue; 925 u32 val; 926 927 /* Clear all Cause registers */ 928 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 929 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 930 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 931 932 /* Mask all interrupts */ 933 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 934 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 935 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 936 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 937 938 /* Enable MBUS Retry bit16 */ 939 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); 940 941 /* Set CPU queue access map - all CPUs have access to all RX 942 * queues and to all TX queues 943 */ 944 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) 945 mvreg_write(pp, MVNETA_CPU_MAP(cpu), 946 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | 947 MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); 948 949 /* Reset RX and TX DMAs */ 950 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 951 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 952 953 /* Disable Legacy WRR, Disable EJP, Release from reset */ 954 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); 955 for (queue = 0; queue < txq_number; queue++) { 956 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); 957 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); 958 } 959 960 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 961 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 962 963 /* Set Port Acceleration Mode */ 964 val = MVNETA_ACC_MODE_EXT; 965 mvreg_write(pp, MVNETA_ACC_MODE, val); 966 967 /* Update val of portCfg register accordingly with all RxQueue types */ 968 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); 969 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 970 971 val = 0; 972 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); 973 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); 974 975 /* Build PORT_SDMA_CONFIG_REG */ 976 val = 0; 977 978 /* Default burst size */ 979 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 980 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 981 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; 982 983 #if defined(__BIG_ENDIAN) 984 val |= MVNETA_DESC_SWAP; 985 #endif 986 987 /* Assign port SDMA configuration */ 988 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 989 990 /* Disable PHY polling in hardware, since we're using the 991 * kernel phylib to do this. 992 */ 993 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); 994 val &= ~MVNETA_PHY_POLLING_ENABLE; 995 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); 996 997 mvneta_set_ucast_table(pp, -1); 998 mvneta_set_special_mcast_table(pp, -1); 999 mvneta_set_other_mcast_table(pp, -1); 1000 1001 /* Set port interrupt enable register - default enable all */ 1002 mvreg_write(pp, MVNETA_INTR_ENABLE, 1003 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK 1004 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); 1005 } 1006 1007 /* Set max sizes for tx queues */ 1008 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) 1009 1010 { 1011 u32 val, size, mtu; 1012 int queue; 1013 1014 mtu = max_tx_size * 8; 1015 if (mtu > MVNETA_TX_MTU_MAX) 1016 mtu = MVNETA_TX_MTU_MAX; 1017 1018 /* Set MTU */ 1019 val = mvreg_read(pp, MVNETA_TX_MTU); 1020 val &= ~MVNETA_TX_MTU_MAX; 1021 val |= mtu; 1022 mvreg_write(pp, MVNETA_TX_MTU, val); 1023 1024 /* TX token size and all TXQs token size must be larger that MTU */ 1025 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); 1026 1027 size = val & MVNETA_TX_TOKEN_SIZE_MAX; 1028 if (size < mtu) { 1029 size = mtu; 1030 val &= ~MVNETA_TX_TOKEN_SIZE_MAX; 1031 val |= size; 1032 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); 1033 } 1034 for (queue = 0; queue < txq_number; queue++) { 1035 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); 1036 1037 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; 1038 if (size < mtu) { 1039 size = mtu; 1040 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; 1041 val |= size; 1042 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); 1043 } 1044 } 1045 } 1046 1047 /* Set unicast address */ 1048 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, 1049 int queue) 1050 { 1051 unsigned int unicast_reg; 1052 unsigned int tbl_offset; 1053 unsigned int reg_offset; 1054 1055 /* Locate the Unicast table entry */ 1056 last_nibble = (0xf & last_nibble); 1057 1058 /* offset from unicast tbl base */ 1059 tbl_offset = (last_nibble / 4) * 4; 1060 1061 /* offset within the above reg */ 1062 reg_offset = last_nibble % 4; 1063 1064 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); 1065 1066 if (queue == -1) { 1067 /* Clear accepts frame bit at specified unicast DA tbl entry */ 1068 unicast_reg &= ~(0xff << (8 * reg_offset)); 1069 } else { 1070 unicast_reg &= ~(0xff << (8 * reg_offset)); 1071 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1072 } 1073 1074 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); 1075 } 1076 1077 /* Set mac address */ 1078 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, 1079 int queue) 1080 { 1081 unsigned int mac_h; 1082 unsigned int mac_l; 1083 1084 if (queue != -1) { 1085 mac_l = (addr[4] << 8) | (addr[5]); 1086 mac_h = (addr[0] << 24) | (addr[1] << 16) | 1087 (addr[2] << 8) | (addr[3] << 0); 1088 1089 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); 1090 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); 1091 } 1092 1093 /* Accept frames of this address */ 1094 mvneta_set_ucast_addr(pp, addr[5], queue); 1095 } 1096 1097 /* Set the number of packets that will be received before RX interrupt 1098 * will be generated by HW. 1099 */ 1100 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, 1101 struct mvneta_rx_queue *rxq, u32 value) 1102 { 1103 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), 1104 value | MVNETA_RXQ_NON_OCCUPIED(0)); 1105 rxq->pkts_coal = value; 1106 } 1107 1108 /* Set the time delay in usec before RX interrupt will be generated by 1109 * HW. 1110 */ 1111 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, 1112 struct mvneta_rx_queue *rxq, u32 value) 1113 { 1114 u32 val; 1115 unsigned long clk_rate; 1116 1117 clk_rate = clk_get_rate(pp->clk); 1118 val = (clk_rate / 1000000) * value; 1119 1120 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); 1121 rxq->time_coal = value; 1122 } 1123 1124 /* Set threshold for TX_DONE pkts coalescing */ 1125 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, 1126 struct mvneta_tx_queue *txq, u32 value) 1127 { 1128 u32 val; 1129 1130 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); 1131 1132 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; 1133 val |= MVNETA_TXQ_SENT_THRESH_MASK(value); 1134 1135 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); 1136 1137 txq->done_pkts_coal = value; 1138 } 1139 1140 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ 1141 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, 1142 u32 phys_addr, u32 cookie) 1143 { 1144 rx_desc->buf_cookie = cookie; 1145 rx_desc->buf_phys_addr = phys_addr; 1146 } 1147 1148 /* Decrement sent descriptors counter */ 1149 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, 1150 struct mvneta_tx_queue *txq, 1151 int sent_desc) 1152 { 1153 u32 val; 1154 1155 /* Only 255 TX descriptors can be updated at once */ 1156 while (sent_desc > 0xff) { 1157 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; 1158 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1159 sent_desc = sent_desc - 0xff; 1160 } 1161 1162 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; 1163 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1164 } 1165 1166 /* Get number of TX descriptors already sent by HW */ 1167 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, 1168 struct mvneta_tx_queue *txq) 1169 { 1170 u32 val; 1171 int sent_desc; 1172 1173 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); 1174 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> 1175 MVNETA_TXQ_SENT_DESC_SHIFT; 1176 1177 return sent_desc; 1178 } 1179 1180 /* Get number of sent descriptors and decrement counter. 1181 * The number of sent descriptors is returned. 1182 */ 1183 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, 1184 struct mvneta_tx_queue *txq) 1185 { 1186 int sent_desc; 1187 1188 /* Get number of sent descriptors */ 1189 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); 1190 1191 /* Decrement sent descriptors counter */ 1192 if (sent_desc) 1193 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); 1194 1195 return sent_desc; 1196 } 1197 1198 /* Set TXQ descriptors fields relevant for CSUM calculation */ 1199 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, 1200 int ip_hdr_len, int l4_proto) 1201 { 1202 u32 command; 1203 1204 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 1205 * G_L4_chk, L4_type; required only for checksum 1206 * calculation 1207 */ 1208 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1209 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1210 1211 if (l3_proto == htons(ETH_P_IP)) 1212 command |= MVNETA_TXD_IP_CSUM; 1213 else 1214 command |= MVNETA_TX_L3_IP6; 1215 1216 if (l4_proto == IPPROTO_TCP) 1217 command |= MVNETA_TX_L4_CSUM_FULL; 1218 else if (l4_proto == IPPROTO_UDP) 1219 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; 1220 else 1221 command |= MVNETA_TX_L4_CSUM_NOT; 1222 1223 return command; 1224 } 1225 1226 1227 /* Display more error info */ 1228 static void mvneta_rx_error(struct mvneta_port *pp, 1229 struct mvneta_rx_desc *rx_desc) 1230 { 1231 u32 status = rx_desc->status; 1232 1233 if (!mvneta_rxq_desc_is_first_last(status)) { 1234 netdev_err(pp->dev, 1235 "bad rx status %08x (buffer oversize), size=%d\n", 1236 status, rx_desc->data_size); 1237 return; 1238 } 1239 1240 switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1241 case MVNETA_RXD_ERR_CRC: 1242 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", 1243 status, rx_desc->data_size); 1244 break; 1245 case MVNETA_RXD_ERR_OVERRUN: 1246 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", 1247 status, rx_desc->data_size); 1248 break; 1249 case MVNETA_RXD_ERR_LEN: 1250 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", 1251 status, rx_desc->data_size); 1252 break; 1253 case MVNETA_RXD_ERR_RESOURCE: 1254 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", 1255 status, rx_desc->data_size); 1256 break; 1257 } 1258 } 1259 1260 /* Handle RX checksum offload based on the descriptor's status */ 1261 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, 1262 struct sk_buff *skb) 1263 { 1264 if ((status & MVNETA_RXD_L3_IP4) && 1265 (status & MVNETA_RXD_L4_CSUM_OK)) { 1266 skb->csum = 0; 1267 skb->ip_summed = CHECKSUM_UNNECESSARY; 1268 return; 1269 } 1270 1271 skb->ip_summed = CHECKSUM_NONE; 1272 } 1273 1274 /* Return tx queue pointer (find last set bit) according to <cause> returned 1275 * form tx_done reg. <cause> must not be null. The return value is always a 1276 * valid queue for matching the first one found in <cause>. 1277 */ 1278 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, 1279 u32 cause) 1280 { 1281 int queue = fls(cause) - 1; 1282 1283 return &pp->txqs[queue]; 1284 } 1285 1286 /* Free tx queue skbuffs */ 1287 static void mvneta_txq_bufs_free(struct mvneta_port *pp, 1288 struct mvneta_tx_queue *txq, int num) 1289 { 1290 int i; 1291 1292 for (i = 0; i < num; i++) { 1293 struct mvneta_tx_desc *tx_desc = txq->descs + 1294 txq->txq_get_index; 1295 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; 1296 1297 mvneta_txq_inc_get(txq); 1298 1299 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) 1300 dma_unmap_single(pp->dev->dev.parent, 1301 tx_desc->buf_phys_addr, 1302 tx_desc->data_size, DMA_TO_DEVICE); 1303 if (!skb) 1304 continue; 1305 dev_kfree_skb_any(skb); 1306 } 1307 } 1308 1309 /* Handle end of transmission */ 1310 static void mvneta_txq_done(struct mvneta_port *pp, 1311 struct mvneta_tx_queue *txq) 1312 { 1313 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 1314 int tx_done; 1315 1316 tx_done = mvneta_txq_sent_desc_proc(pp, txq); 1317 if (!tx_done) 1318 return; 1319 1320 mvneta_txq_bufs_free(pp, txq, tx_done); 1321 1322 txq->count -= tx_done; 1323 1324 if (netif_tx_queue_stopped(nq)) { 1325 if (txq->count <= txq->tx_wake_threshold) 1326 netif_tx_wake_queue(nq); 1327 } 1328 } 1329 1330 static void *mvneta_frag_alloc(const struct mvneta_port *pp) 1331 { 1332 if (likely(pp->frag_size <= PAGE_SIZE)) 1333 return netdev_alloc_frag(pp->frag_size); 1334 else 1335 return kmalloc(pp->frag_size, GFP_ATOMIC); 1336 } 1337 1338 static void mvneta_frag_free(const struct mvneta_port *pp, void *data) 1339 { 1340 if (likely(pp->frag_size <= PAGE_SIZE)) 1341 put_page(virt_to_head_page(data)); 1342 else 1343 kfree(data); 1344 } 1345 1346 /* Refill processing */ 1347 static int mvneta_rx_refill(struct mvneta_port *pp, 1348 struct mvneta_rx_desc *rx_desc) 1349 1350 { 1351 dma_addr_t phys_addr; 1352 void *data; 1353 1354 data = mvneta_frag_alloc(pp); 1355 if (!data) 1356 return -ENOMEM; 1357 1358 phys_addr = dma_map_single(pp->dev->dev.parent, data, 1359 MVNETA_RX_BUF_SIZE(pp->pkt_size), 1360 DMA_FROM_DEVICE); 1361 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { 1362 mvneta_frag_free(pp, data); 1363 return -ENOMEM; 1364 } 1365 1366 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data); 1367 return 0; 1368 } 1369 1370 /* Handle tx checksum */ 1371 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) 1372 { 1373 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1374 int ip_hdr_len = 0; 1375 __be16 l3_proto = vlan_get_protocol(skb); 1376 u8 l4_proto; 1377 1378 if (l3_proto == htons(ETH_P_IP)) { 1379 struct iphdr *ip4h = ip_hdr(skb); 1380 1381 /* Calculate IPv4 checksum and L4 checksum */ 1382 ip_hdr_len = ip4h->ihl; 1383 l4_proto = ip4h->protocol; 1384 } else if (l3_proto == htons(ETH_P_IPV6)) { 1385 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1386 1387 /* Read l4_protocol from one of IPv6 extra headers */ 1388 if (skb_network_header_len(skb) > 0) 1389 ip_hdr_len = (skb_network_header_len(skb) >> 2); 1390 l4_proto = ip6h->nexthdr; 1391 } else 1392 return MVNETA_TX_L4_CSUM_NOT; 1393 1394 return mvneta_txq_desc_csum(skb_network_offset(skb), 1395 l3_proto, ip_hdr_len, l4_proto); 1396 } 1397 1398 return MVNETA_TX_L4_CSUM_NOT; 1399 } 1400 1401 /* Returns rx queue pointer (find last set bit) according to causeRxTx 1402 * value 1403 */ 1404 static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp, 1405 u32 cause) 1406 { 1407 int queue = fls(cause >> 8) - 1; 1408 1409 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue]; 1410 } 1411 1412 /* Drop packets received by the RXQ and free buffers */ 1413 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, 1414 struct mvneta_rx_queue *rxq) 1415 { 1416 int rx_done, i; 1417 1418 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1419 for (i = 0; i < rxq->size; i++) { 1420 struct mvneta_rx_desc *rx_desc = rxq->descs + i; 1421 void *data = (void *)rx_desc->buf_cookie; 1422 1423 mvneta_frag_free(pp, data); 1424 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1425 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1426 } 1427 1428 if (rx_done) 1429 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1430 } 1431 1432 /* Main rx processing */ 1433 static int mvneta_rx(struct mvneta_port *pp, int rx_todo, 1434 struct mvneta_rx_queue *rxq) 1435 { 1436 struct net_device *dev = pp->dev; 1437 int rx_done, rx_filled; 1438 u32 rcvd_pkts = 0; 1439 u32 rcvd_bytes = 0; 1440 1441 /* Get number of received packets */ 1442 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1443 1444 if (rx_todo > rx_done) 1445 rx_todo = rx_done; 1446 1447 rx_done = 0; 1448 rx_filled = 0; 1449 1450 /* Fairness NAPI loop */ 1451 while (rx_done < rx_todo) { 1452 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 1453 struct sk_buff *skb; 1454 unsigned char *data; 1455 u32 rx_status; 1456 int rx_bytes, err; 1457 1458 rx_done++; 1459 rx_filled++; 1460 rx_status = rx_desc->status; 1461 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1462 data = (unsigned char *)rx_desc->buf_cookie; 1463 1464 if (!mvneta_rxq_desc_is_first_last(rx_status) || 1465 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 1466 err_drop_frame: 1467 dev->stats.rx_errors++; 1468 mvneta_rx_error(pp, rx_desc); 1469 /* leave the descriptor untouched */ 1470 continue; 1471 } 1472 1473 if (rx_bytes <= rx_copybreak) { 1474 /* better copy a small frame and not unmap the DMA region */ 1475 skb = netdev_alloc_skb_ip_align(dev, rx_bytes); 1476 if (unlikely(!skb)) 1477 goto err_drop_frame; 1478 1479 dma_sync_single_range_for_cpu(dev->dev.parent, 1480 rx_desc->buf_phys_addr, 1481 MVNETA_MH_SIZE + NET_SKB_PAD, 1482 rx_bytes, 1483 DMA_FROM_DEVICE); 1484 memcpy(skb_put(skb, rx_bytes), 1485 data + MVNETA_MH_SIZE + NET_SKB_PAD, 1486 rx_bytes); 1487 1488 skb->protocol = eth_type_trans(skb, dev); 1489 mvneta_rx_csum(pp, rx_status, skb); 1490 napi_gro_receive(&pp->napi, skb); 1491 1492 rcvd_pkts++; 1493 rcvd_bytes += rx_bytes; 1494 1495 /* leave the descriptor and buffer untouched */ 1496 continue; 1497 } 1498 1499 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1500 if (!skb) 1501 goto err_drop_frame; 1502 1503 dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, 1504 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1505 1506 rcvd_pkts++; 1507 rcvd_bytes += rx_bytes; 1508 1509 /* Linux processing */ 1510 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); 1511 skb_put(skb, rx_bytes); 1512 1513 skb->protocol = eth_type_trans(skb, dev); 1514 1515 mvneta_rx_csum(pp, rx_status, skb); 1516 1517 napi_gro_receive(&pp->napi, skb); 1518 1519 /* Refill processing */ 1520 err = mvneta_rx_refill(pp, rx_desc); 1521 if (err) { 1522 netdev_err(dev, "Linux processing - Can't refill\n"); 1523 rxq->missed++; 1524 rx_filled--; 1525 } 1526 } 1527 1528 if (rcvd_pkts) { 1529 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1530 1531 u64_stats_update_begin(&stats->syncp); 1532 stats->rx_packets += rcvd_pkts; 1533 stats->rx_bytes += rcvd_bytes; 1534 u64_stats_update_end(&stats->syncp); 1535 } 1536 1537 /* Update rxq management counters */ 1538 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); 1539 1540 return rx_done; 1541 } 1542 1543 static inline void 1544 mvneta_tso_put_hdr(struct sk_buff *skb, 1545 struct mvneta_port *pp, struct mvneta_tx_queue *txq) 1546 { 1547 struct mvneta_tx_desc *tx_desc; 1548 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1549 1550 txq->tx_skb[txq->txq_put_index] = NULL; 1551 tx_desc = mvneta_txq_next_desc_get(txq); 1552 tx_desc->data_size = hdr_len; 1553 tx_desc->command = mvneta_skb_tx_csum(pp, skb); 1554 tx_desc->command |= MVNETA_TXD_F_DESC; 1555 tx_desc->buf_phys_addr = txq->tso_hdrs_phys + 1556 txq->txq_put_index * TSO_HEADER_SIZE; 1557 mvneta_txq_inc_put(txq); 1558 } 1559 1560 static inline int 1561 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, 1562 struct sk_buff *skb, char *data, int size, 1563 bool last_tcp, bool is_last) 1564 { 1565 struct mvneta_tx_desc *tx_desc; 1566 1567 tx_desc = mvneta_txq_next_desc_get(txq); 1568 tx_desc->data_size = size; 1569 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, 1570 size, DMA_TO_DEVICE); 1571 if (unlikely(dma_mapping_error(dev->dev.parent, 1572 tx_desc->buf_phys_addr))) { 1573 mvneta_txq_desc_put(txq); 1574 return -ENOMEM; 1575 } 1576 1577 tx_desc->command = 0; 1578 txq->tx_skb[txq->txq_put_index] = NULL; 1579 1580 if (last_tcp) { 1581 /* last descriptor in the TCP packet */ 1582 tx_desc->command = MVNETA_TXD_L_DESC; 1583 1584 /* last descriptor in SKB */ 1585 if (is_last) 1586 txq->tx_skb[txq->txq_put_index] = skb; 1587 } 1588 mvneta_txq_inc_put(txq); 1589 return 0; 1590 } 1591 1592 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, 1593 struct mvneta_tx_queue *txq) 1594 { 1595 int total_len, data_left; 1596 int desc_count = 0; 1597 struct mvneta_port *pp = netdev_priv(dev); 1598 struct tso_t tso; 1599 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1600 int i; 1601 1602 /* Count needed descriptors */ 1603 if ((txq->count + tso_count_descs(skb)) >= txq->size) 1604 return 0; 1605 1606 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { 1607 pr_info("*** Is this even possible???!?!?\n"); 1608 return 0; 1609 } 1610 1611 /* Initialize the TSO handler, and prepare the first payload */ 1612 tso_start(skb, &tso); 1613 1614 total_len = skb->len - hdr_len; 1615 while (total_len > 0) { 1616 char *hdr; 1617 1618 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 1619 total_len -= data_left; 1620 desc_count++; 1621 1622 /* prepare packet headers: MAC + IP + TCP */ 1623 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; 1624 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 1625 1626 mvneta_tso_put_hdr(skb, pp, txq); 1627 1628 while (data_left > 0) { 1629 int size; 1630 desc_count++; 1631 1632 size = min_t(int, tso.size, data_left); 1633 1634 if (mvneta_tso_put_data(dev, txq, skb, 1635 tso.data, size, 1636 size == data_left, 1637 total_len == 0)) 1638 goto err_release; 1639 data_left -= size; 1640 1641 tso_build_data(skb, &tso, size); 1642 } 1643 } 1644 1645 return desc_count; 1646 1647 err_release: 1648 /* Release all used data descriptors; header descriptors must not 1649 * be DMA-unmapped. 1650 */ 1651 for (i = desc_count - 1; i >= 0; i--) { 1652 struct mvneta_tx_desc *tx_desc = txq->descs + i; 1653 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) 1654 dma_unmap_single(pp->dev->dev.parent, 1655 tx_desc->buf_phys_addr, 1656 tx_desc->data_size, 1657 DMA_TO_DEVICE); 1658 mvneta_txq_desc_put(txq); 1659 } 1660 return 0; 1661 } 1662 1663 /* Handle tx fragmentation processing */ 1664 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 1665 struct mvneta_tx_queue *txq) 1666 { 1667 struct mvneta_tx_desc *tx_desc; 1668 int i, nr_frags = skb_shinfo(skb)->nr_frags; 1669 1670 for (i = 0; i < nr_frags; i++) { 1671 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1672 void *addr = page_address(frag->page.p) + frag->page_offset; 1673 1674 tx_desc = mvneta_txq_next_desc_get(txq); 1675 tx_desc->data_size = frag->size; 1676 1677 tx_desc->buf_phys_addr = 1678 dma_map_single(pp->dev->dev.parent, addr, 1679 tx_desc->data_size, DMA_TO_DEVICE); 1680 1681 if (dma_mapping_error(pp->dev->dev.parent, 1682 tx_desc->buf_phys_addr)) { 1683 mvneta_txq_desc_put(txq); 1684 goto error; 1685 } 1686 1687 if (i == nr_frags - 1) { 1688 /* Last descriptor */ 1689 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 1690 txq->tx_skb[txq->txq_put_index] = skb; 1691 } else { 1692 /* Descriptor in the middle: Not First, Not Last */ 1693 tx_desc->command = 0; 1694 txq->tx_skb[txq->txq_put_index] = NULL; 1695 } 1696 mvneta_txq_inc_put(txq); 1697 } 1698 1699 return 0; 1700 1701 error: 1702 /* Release all descriptors that were used to map fragments of 1703 * this packet, as well as the corresponding DMA mappings 1704 */ 1705 for (i = i - 1; i >= 0; i--) { 1706 tx_desc = txq->descs + i; 1707 dma_unmap_single(pp->dev->dev.parent, 1708 tx_desc->buf_phys_addr, 1709 tx_desc->data_size, 1710 DMA_TO_DEVICE); 1711 mvneta_txq_desc_put(txq); 1712 } 1713 1714 return -ENOMEM; 1715 } 1716 1717 /* Main tx processing */ 1718 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) 1719 { 1720 struct mvneta_port *pp = netdev_priv(dev); 1721 u16 txq_id = skb_get_queue_mapping(skb); 1722 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 1723 struct mvneta_tx_desc *tx_desc; 1724 int len = skb->len; 1725 int frags = 0; 1726 u32 tx_cmd; 1727 1728 if (!netif_running(dev)) 1729 goto out; 1730 1731 if (skb_is_gso(skb)) { 1732 frags = mvneta_tx_tso(skb, dev, txq); 1733 goto out; 1734 } 1735 1736 frags = skb_shinfo(skb)->nr_frags + 1; 1737 1738 /* Get a descriptor for the first part of the packet */ 1739 tx_desc = mvneta_txq_next_desc_get(txq); 1740 1741 tx_cmd = mvneta_skb_tx_csum(pp, skb); 1742 1743 tx_desc->data_size = skb_headlen(skb); 1744 1745 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, 1746 tx_desc->data_size, 1747 DMA_TO_DEVICE); 1748 if (unlikely(dma_mapping_error(dev->dev.parent, 1749 tx_desc->buf_phys_addr))) { 1750 mvneta_txq_desc_put(txq); 1751 frags = 0; 1752 goto out; 1753 } 1754 1755 if (frags == 1) { 1756 /* First and Last descriptor */ 1757 tx_cmd |= MVNETA_TXD_FLZ_DESC; 1758 tx_desc->command = tx_cmd; 1759 txq->tx_skb[txq->txq_put_index] = skb; 1760 mvneta_txq_inc_put(txq); 1761 } else { 1762 /* First but not Last */ 1763 tx_cmd |= MVNETA_TXD_F_DESC; 1764 txq->tx_skb[txq->txq_put_index] = NULL; 1765 mvneta_txq_inc_put(txq); 1766 tx_desc->command = tx_cmd; 1767 /* Continue with other skb fragments */ 1768 if (mvneta_tx_frag_process(pp, skb, txq)) { 1769 dma_unmap_single(dev->dev.parent, 1770 tx_desc->buf_phys_addr, 1771 tx_desc->data_size, 1772 DMA_TO_DEVICE); 1773 mvneta_txq_desc_put(txq); 1774 frags = 0; 1775 goto out; 1776 } 1777 } 1778 1779 out: 1780 if (frags > 0) { 1781 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1782 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 1783 1784 txq->count += frags; 1785 mvneta_txq_pend_desc_add(pp, txq, frags); 1786 1787 if (txq->count >= txq->tx_stop_threshold) 1788 netif_tx_stop_queue(nq); 1789 1790 u64_stats_update_begin(&stats->syncp); 1791 stats->tx_packets++; 1792 stats->tx_bytes += len; 1793 u64_stats_update_end(&stats->syncp); 1794 } else { 1795 dev->stats.tx_dropped++; 1796 dev_kfree_skb_any(skb); 1797 } 1798 1799 return NETDEV_TX_OK; 1800 } 1801 1802 1803 /* Free tx resources, when resetting a port */ 1804 static void mvneta_txq_done_force(struct mvneta_port *pp, 1805 struct mvneta_tx_queue *txq) 1806 1807 { 1808 int tx_done = txq->count; 1809 1810 mvneta_txq_bufs_free(pp, txq, tx_done); 1811 1812 /* reset txq */ 1813 txq->count = 0; 1814 txq->txq_put_index = 0; 1815 txq->txq_get_index = 0; 1816 } 1817 1818 /* Handle tx done - called in softirq context. The <cause_tx_done> argument 1819 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. 1820 */ 1821 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) 1822 { 1823 struct mvneta_tx_queue *txq; 1824 struct netdev_queue *nq; 1825 1826 while (cause_tx_done) { 1827 txq = mvneta_tx_done_policy(pp, cause_tx_done); 1828 1829 nq = netdev_get_tx_queue(pp->dev, txq->id); 1830 __netif_tx_lock(nq, smp_processor_id()); 1831 1832 if (txq->count) 1833 mvneta_txq_done(pp, txq); 1834 1835 __netif_tx_unlock(nq); 1836 cause_tx_done &= ~((1 << txq->id)); 1837 } 1838 } 1839 1840 /* Compute crc8 of the specified address, using a unique algorithm , 1841 * according to hw spec, different than generic crc8 algorithm 1842 */ 1843 static int mvneta_addr_crc(unsigned char *addr) 1844 { 1845 int crc = 0; 1846 int i; 1847 1848 for (i = 0; i < ETH_ALEN; i++) { 1849 int j; 1850 1851 crc = (crc ^ addr[i]) << 8; 1852 for (j = 7; j >= 0; j--) { 1853 if (crc & (0x100 << j)) 1854 crc ^= 0x107 << j; 1855 } 1856 } 1857 1858 return crc; 1859 } 1860 1861 /* This method controls the net device special MAC multicast support. 1862 * The Special Multicast Table for MAC addresses supports MAC of the form 1863 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 1864 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 1865 * Table entries in the DA-Filter table. This method set the Special 1866 * Multicast Table appropriate entry. 1867 */ 1868 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, 1869 unsigned char last_byte, 1870 int queue) 1871 { 1872 unsigned int smc_table_reg; 1873 unsigned int tbl_offset; 1874 unsigned int reg_offset; 1875 1876 /* Register offset from SMC table base */ 1877 tbl_offset = (last_byte / 4); 1878 /* Entry offset within the above reg */ 1879 reg_offset = last_byte % 4; 1880 1881 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST 1882 + tbl_offset * 4)); 1883 1884 if (queue == -1) 1885 smc_table_reg &= ~(0xff << (8 * reg_offset)); 1886 else { 1887 smc_table_reg &= ~(0xff << (8 * reg_offset)); 1888 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1889 } 1890 1891 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, 1892 smc_table_reg); 1893 } 1894 1895 /* This method controls the network device Other MAC multicast support. 1896 * The Other Multicast Table is used for multicast of another type. 1897 * A CRC-8 is used as an index to the Other Multicast Table entries 1898 * in the DA-Filter table. 1899 * The method gets the CRC-8 value from the calling routine and 1900 * sets the Other Multicast Table appropriate entry according to the 1901 * specified CRC-8 . 1902 */ 1903 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, 1904 unsigned char crc8, 1905 int queue) 1906 { 1907 unsigned int omc_table_reg; 1908 unsigned int tbl_offset; 1909 unsigned int reg_offset; 1910 1911 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ 1912 reg_offset = crc8 % 4; /* Entry offset within the above reg */ 1913 1914 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); 1915 1916 if (queue == -1) { 1917 /* Clear accepts frame bit at specified Other DA table entry */ 1918 omc_table_reg &= ~(0xff << (8 * reg_offset)); 1919 } else { 1920 omc_table_reg &= ~(0xff << (8 * reg_offset)); 1921 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1922 } 1923 1924 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); 1925 } 1926 1927 /* The network device supports multicast using two tables: 1928 * 1) Special Multicast Table for MAC addresses of the form 1929 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 1930 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 1931 * Table entries in the DA-Filter table. 1932 * 2) Other Multicast Table for multicast of another type. A CRC-8 value 1933 * is used as an index to the Other Multicast Table entries in the 1934 * DA-Filter table. 1935 */ 1936 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, 1937 int queue) 1938 { 1939 unsigned char crc_result = 0; 1940 1941 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { 1942 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); 1943 return 0; 1944 } 1945 1946 crc_result = mvneta_addr_crc(p_addr); 1947 if (queue == -1) { 1948 if (pp->mcast_count[crc_result] == 0) { 1949 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", 1950 crc_result); 1951 return -EINVAL; 1952 } 1953 1954 pp->mcast_count[crc_result]--; 1955 if (pp->mcast_count[crc_result] != 0) { 1956 netdev_info(pp->dev, 1957 "After delete there are %d valid Mcast for crc8=0x%02x\n", 1958 pp->mcast_count[crc_result], crc_result); 1959 return -EINVAL; 1960 } 1961 } else 1962 pp->mcast_count[crc_result]++; 1963 1964 mvneta_set_other_mcast_addr(pp, crc_result, queue); 1965 1966 return 0; 1967 } 1968 1969 /* Configure Fitering mode of Ethernet port */ 1970 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, 1971 int is_promisc) 1972 { 1973 u32 port_cfg_reg, val; 1974 1975 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); 1976 1977 val = mvreg_read(pp, MVNETA_TYPE_PRIO); 1978 1979 /* Set / Clear UPM bit in port configuration register */ 1980 if (is_promisc) { 1981 /* Accept all Unicast addresses */ 1982 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; 1983 val |= MVNETA_FORCE_UNI; 1984 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); 1985 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); 1986 } else { 1987 /* Reject all Unicast addresses */ 1988 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; 1989 val &= ~MVNETA_FORCE_UNI; 1990 } 1991 1992 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); 1993 mvreg_write(pp, MVNETA_TYPE_PRIO, val); 1994 } 1995 1996 /* register unicast and multicast addresses */ 1997 static void mvneta_set_rx_mode(struct net_device *dev) 1998 { 1999 struct mvneta_port *pp = netdev_priv(dev); 2000 struct netdev_hw_addr *ha; 2001 2002 if (dev->flags & IFF_PROMISC) { 2003 /* Accept all: Multicast + Unicast */ 2004 mvneta_rx_unicast_promisc_set(pp, 1); 2005 mvneta_set_ucast_table(pp, rxq_def); 2006 mvneta_set_special_mcast_table(pp, rxq_def); 2007 mvneta_set_other_mcast_table(pp, rxq_def); 2008 } else { 2009 /* Accept single Unicast */ 2010 mvneta_rx_unicast_promisc_set(pp, 0); 2011 mvneta_set_ucast_table(pp, -1); 2012 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); 2013 2014 if (dev->flags & IFF_ALLMULTI) { 2015 /* Accept all multicast */ 2016 mvneta_set_special_mcast_table(pp, rxq_def); 2017 mvneta_set_other_mcast_table(pp, rxq_def); 2018 } else { 2019 /* Accept only initialized multicast */ 2020 mvneta_set_special_mcast_table(pp, -1); 2021 mvneta_set_other_mcast_table(pp, -1); 2022 2023 if (!netdev_mc_empty(dev)) { 2024 netdev_for_each_mc_addr(ha, dev) { 2025 mvneta_mcast_addr_set(pp, ha->addr, 2026 rxq_def); 2027 } 2028 } 2029 } 2030 } 2031 } 2032 2033 /* Interrupt handling - the callback for request_irq() */ 2034 static irqreturn_t mvneta_isr(int irq, void *dev_id) 2035 { 2036 struct mvneta_port *pp = (struct mvneta_port *)dev_id; 2037 2038 /* Mask all interrupts */ 2039 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2040 2041 napi_schedule(&pp->napi); 2042 2043 return IRQ_HANDLED; 2044 } 2045 2046 /* NAPI handler 2047 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 2048 * packets on the corresponding TXQ (Bit 0 is for TX queue 1). 2049 * Bits 8 -15 of the cause Rx Tx register indicate that are received 2050 * packets on the corresponding RXQ (Bit 8 is for RX queue 0). 2051 * Each CPU has its own causeRxTx register 2052 */ 2053 static int mvneta_poll(struct napi_struct *napi, int budget) 2054 { 2055 int rx_done = 0; 2056 u32 cause_rx_tx; 2057 unsigned long flags; 2058 struct mvneta_port *pp = netdev_priv(napi->dev); 2059 2060 if (!netif_running(pp->dev)) { 2061 napi_complete(napi); 2062 return rx_done; 2063 } 2064 2065 /* Read cause register */ 2066 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) & 2067 (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 2068 2069 /* Release Tx descriptors */ 2070 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { 2071 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); 2072 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; 2073 } 2074 2075 /* For the case where the last mvneta_poll did not process all 2076 * RX packets 2077 */ 2078 cause_rx_tx |= pp->cause_rx_tx; 2079 if (rxq_number > 1) { 2080 while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) { 2081 int count; 2082 struct mvneta_rx_queue *rxq; 2083 /* get rx queue number from cause_rx_tx */ 2084 rxq = mvneta_rx_policy(pp, cause_rx_tx); 2085 if (!rxq) 2086 break; 2087 2088 /* process the packet in that rx queue */ 2089 count = mvneta_rx(pp, budget, rxq); 2090 rx_done += count; 2091 budget -= count; 2092 if (budget > 0) { 2093 /* set off the rx bit of the 2094 * corresponding bit in the cause rx 2095 * tx register, so that next iteration 2096 * will find the next rx queue where 2097 * packets are received on 2098 */ 2099 cause_rx_tx &= ~((1 << rxq->id) << 8); 2100 } 2101 } 2102 } else { 2103 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); 2104 budget -= rx_done; 2105 } 2106 2107 if (budget > 0) { 2108 cause_rx_tx = 0; 2109 napi_complete(napi); 2110 local_irq_save(flags); 2111 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2112 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 2113 local_irq_restore(flags); 2114 } 2115 2116 pp->cause_rx_tx = cause_rx_tx; 2117 return rx_done; 2118 } 2119 2120 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ 2121 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 2122 int num) 2123 { 2124 int i; 2125 2126 for (i = 0; i < num; i++) { 2127 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); 2128 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) { 2129 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", 2130 __func__, rxq->id, i, num); 2131 break; 2132 } 2133 } 2134 2135 /* Add this number of RX descriptors as non occupied (ready to 2136 * get packets) 2137 */ 2138 mvneta_rxq_non_occup_desc_add(pp, rxq, i); 2139 2140 return i; 2141 } 2142 2143 /* Free all packets pending transmit from all TXQs and reset TX port */ 2144 static void mvneta_tx_reset(struct mvneta_port *pp) 2145 { 2146 int queue; 2147 2148 /* free the skb's in the tx ring */ 2149 for (queue = 0; queue < txq_number; queue++) 2150 mvneta_txq_done_force(pp, &pp->txqs[queue]); 2151 2152 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 2153 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 2154 } 2155 2156 static void mvneta_rx_reset(struct mvneta_port *pp) 2157 { 2158 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 2159 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 2160 } 2161 2162 /* Rx/Tx queue initialization/cleanup methods */ 2163 2164 /* Create a specified RX queue */ 2165 static int mvneta_rxq_init(struct mvneta_port *pp, 2166 struct mvneta_rx_queue *rxq) 2167 2168 { 2169 rxq->size = pp->rx_ring_size; 2170 2171 /* Allocate memory for RX descriptors */ 2172 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2173 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 2174 &rxq->descs_phys, GFP_KERNEL); 2175 if (rxq->descs == NULL) 2176 return -ENOMEM; 2177 2178 BUG_ON(rxq->descs != 2179 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); 2180 2181 rxq->last_desc = rxq->size - 1; 2182 2183 /* Set Rx descriptors queue starting address */ 2184 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); 2185 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); 2186 2187 /* Set Offset */ 2188 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD); 2189 2190 /* Set coalescing pkts and time */ 2191 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 2192 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 2193 2194 /* Fill RXQ with buffers from RX pool */ 2195 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); 2196 mvneta_rxq_bm_disable(pp, rxq); 2197 mvneta_rxq_fill(pp, rxq, rxq->size); 2198 2199 return 0; 2200 } 2201 2202 /* Cleanup Rx queue */ 2203 static void mvneta_rxq_deinit(struct mvneta_port *pp, 2204 struct mvneta_rx_queue *rxq) 2205 { 2206 mvneta_rxq_drop_pkts(pp, rxq); 2207 2208 if (rxq->descs) 2209 dma_free_coherent(pp->dev->dev.parent, 2210 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 2211 rxq->descs, 2212 rxq->descs_phys); 2213 2214 rxq->descs = NULL; 2215 rxq->last_desc = 0; 2216 rxq->next_desc_to_proc = 0; 2217 rxq->descs_phys = 0; 2218 } 2219 2220 /* Create and initialize a tx queue */ 2221 static int mvneta_txq_init(struct mvneta_port *pp, 2222 struct mvneta_tx_queue *txq) 2223 { 2224 txq->size = pp->tx_ring_size; 2225 2226 /* A queue must always have room for at least one skb. 2227 * Therefore, stop the queue when the free entries reaches 2228 * the maximum number of descriptors per skb. 2229 */ 2230 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; 2231 txq->tx_wake_threshold = txq->tx_stop_threshold / 2; 2232 2233 2234 /* Allocate memory for TX descriptors */ 2235 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2236 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2237 &txq->descs_phys, GFP_KERNEL); 2238 if (txq->descs == NULL) 2239 return -ENOMEM; 2240 2241 /* Make sure descriptor address is cache line size aligned */ 2242 BUG_ON(txq->descs != 2243 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); 2244 2245 txq->last_desc = txq->size - 1; 2246 2247 /* Set maximum bandwidth for enabled TXQs */ 2248 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); 2249 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); 2250 2251 /* Set Tx descriptors queue starting address */ 2252 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); 2253 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); 2254 2255 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL); 2256 if (txq->tx_skb == NULL) { 2257 dma_free_coherent(pp->dev->dev.parent, 2258 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2259 txq->descs, txq->descs_phys); 2260 return -ENOMEM; 2261 } 2262 2263 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 2264 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, 2265 txq->size * TSO_HEADER_SIZE, 2266 &txq->tso_hdrs_phys, GFP_KERNEL); 2267 if (txq->tso_hdrs == NULL) { 2268 kfree(txq->tx_skb); 2269 dma_free_coherent(pp->dev->dev.parent, 2270 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2271 txq->descs, txq->descs_phys); 2272 return -ENOMEM; 2273 } 2274 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 2275 2276 return 0; 2277 } 2278 2279 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ 2280 static void mvneta_txq_deinit(struct mvneta_port *pp, 2281 struct mvneta_tx_queue *txq) 2282 { 2283 kfree(txq->tx_skb); 2284 2285 if (txq->tso_hdrs) 2286 dma_free_coherent(pp->dev->dev.parent, 2287 txq->size * TSO_HEADER_SIZE, 2288 txq->tso_hdrs, txq->tso_hdrs_phys); 2289 if (txq->descs) 2290 dma_free_coherent(pp->dev->dev.parent, 2291 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2292 txq->descs, txq->descs_phys); 2293 2294 txq->descs = NULL; 2295 txq->last_desc = 0; 2296 txq->next_desc_to_proc = 0; 2297 txq->descs_phys = 0; 2298 2299 /* Set minimum bandwidth for disabled TXQs */ 2300 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); 2301 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); 2302 2303 /* Set Tx descriptors queue starting address and size */ 2304 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); 2305 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); 2306 } 2307 2308 /* Cleanup all Tx queues */ 2309 static void mvneta_cleanup_txqs(struct mvneta_port *pp) 2310 { 2311 int queue; 2312 2313 for (queue = 0; queue < txq_number; queue++) 2314 mvneta_txq_deinit(pp, &pp->txqs[queue]); 2315 } 2316 2317 /* Cleanup all Rx queues */ 2318 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) 2319 { 2320 int queue; 2321 2322 for (queue = 0; queue < rxq_number; queue++) 2323 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 2324 } 2325 2326 2327 /* Init all Rx queues */ 2328 static int mvneta_setup_rxqs(struct mvneta_port *pp) 2329 { 2330 int queue; 2331 2332 for (queue = 0; queue < rxq_number; queue++) { 2333 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); 2334 if (err) { 2335 netdev_err(pp->dev, "%s: can't create rxq=%d\n", 2336 __func__, queue); 2337 mvneta_cleanup_rxqs(pp); 2338 return err; 2339 } 2340 } 2341 2342 return 0; 2343 } 2344 2345 /* Init all tx queues */ 2346 static int mvneta_setup_txqs(struct mvneta_port *pp) 2347 { 2348 int queue; 2349 2350 for (queue = 0; queue < txq_number; queue++) { 2351 int err = mvneta_txq_init(pp, &pp->txqs[queue]); 2352 if (err) { 2353 netdev_err(pp->dev, "%s: can't create txq=%d\n", 2354 __func__, queue); 2355 mvneta_cleanup_txqs(pp); 2356 return err; 2357 } 2358 } 2359 2360 return 0; 2361 } 2362 2363 static void mvneta_start_dev(struct mvneta_port *pp) 2364 { 2365 mvneta_max_rx_size_set(pp, pp->pkt_size); 2366 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 2367 2368 /* start the Rx/Tx activity */ 2369 mvneta_port_enable(pp); 2370 2371 /* Enable polling on the port */ 2372 napi_enable(&pp->napi); 2373 2374 /* Unmask interrupts */ 2375 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2376 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 2377 2378 phy_start(pp->phy_dev); 2379 netif_tx_start_all_queues(pp->dev); 2380 } 2381 2382 static void mvneta_stop_dev(struct mvneta_port *pp) 2383 { 2384 phy_stop(pp->phy_dev); 2385 2386 napi_disable(&pp->napi); 2387 2388 netif_carrier_off(pp->dev); 2389 2390 mvneta_port_down(pp); 2391 netif_tx_stop_all_queues(pp->dev); 2392 2393 /* Stop the port activity */ 2394 mvneta_port_disable(pp); 2395 2396 /* Clear all ethernet port interrupts */ 2397 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2398 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 2399 2400 /* Mask all ethernet port interrupts */ 2401 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2402 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2403 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 2404 2405 mvneta_tx_reset(pp); 2406 mvneta_rx_reset(pp); 2407 } 2408 2409 /* Return positive if MTU is valid */ 2410 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu) 2411 { 2412 if (mtu < 68) { 2413 netdev_err(dev, "cannot change mtu to less than 68\n"); 2414 return -EINVAL; 2415 } 2416 2417 /* 9676 == 9700 - 20 and rounding to 8 */ 2418 if (mtu > 9676) { 2419 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu); 2420 mtu = 9676; 2421 } 2422 2423 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { 2424 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", 2425 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); 2426 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); 2427 } 2428 2429 return mtu; 2430 } 2431 2432 /* Change the device mtu */ 2433 static int mvneta_change_mtu(struct net_device *dev, int mtu) 2434 { 2435 struct mvneta_port *pp = netdev_priv(dev); 2436 int ret; 2437 2438 mtu = mvneta_check_mtu_valid(dev, mtu); 2439 if (mtu < 0) 2440 return -EINVAL; 2441 2442 dev->mtu = mtu; 2443 2444 if (!netif_running(dev)) 2445 return 0; 2446 2447 /* The interface is running, so we have to force a 2448 * reallocation of the queues 2449 */ 2450 mvneta_stop_dev(pp); 2451 2452 mvneta_cleanup_txqs(pp); 2453 mvneta_cleanup_rxqs(pp); 2454 2455 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); 2456 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2457 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2458 2459 ret = mvneta_setup_rxqs(pp); 2460 if (ret) { 2461 netdev_err(dev, "unable to setup rxqs after MTU change\n"); 2462 return ret; 2463 } 2464 2465 ret = mvneta_setup_txqs(pp); 2466 if (ret) { 2467 netdev_err(dev, "unable to setup txqs after MTU change\n"); 2468 return ret; 2469 } 2470 2471 mvneta_start_dev(pp); 2472 mvneta_port_up(pp); 2473 2474 return 0; 2475 } 2476 2477 /* Get mac address */ 2478 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) 2479 { 2480 u32 mac_addr_l, mac_addr_h; 2481 2482 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); 2483 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); 2484 addr[0] = (mac_addr_h >> 24) & 0xFF; 2485 addr[1] = (mac_addr_h >> 16) & 0xFF; 2486 addr[2] = (mac_addr_h >> 8) & 0xFF; 2487 addr[3] = mac_addr_h & 0xFF; 2488 addr[4] = (mac_addr_l >> 8) & 0xFF; 2489 addr[5] = mac_addr_l & 0xFF; 2490 } 2491 2492 /* Handle setting mac address */ 2493 static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 2494 { 2495 struct mvneta_port *pp = netdev_priv(dev); 2496 struct sockaddr *sockaddr = addr; 2497 int ret; 2498 2499 ret = eth_prepare_mac_addr_change(dev, addr); 2500 if (ret < 0) 2501 return ret; 2502 /* Remove previous address table entry */ 2503 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 2504 2505 /* Set new addr in hw */ 2506 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def); 2507 2508 eth_commit_mac_addr_change(dev, addr); 2509 return 0; 2510 } 2511 2512 static void mvneta_adjust_link(struct net_device *ndev) 2513 { 2514 struct mvneta_port *pp = netdev_priv(ndev); 2515 struct phy_device *phydev = pp->phy_dev; 2516 int status_change = 0; 2517 2518 if (phydev->link) { 2519 if ((pp->speed != phydev->speed) || 2520 (pp->duplex != phydev->duplex)) { 2521 u32 val; 2522 2523 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2524 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | 2525 MVNETA_GMAC_CONFIG_GMII_SPEED | 2526 MVNETA_GMAC_CONFIG_FULL_DUPLEX | 2527 MVNETA_GMAC_AN_SPEED_EN | 2528 MVNETA_GMAC_AN_DUPLEX_EN); 2529 2530 if (phydev->duplex) 2531 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 2532 2533 if (phydev->speed == SPEED_1000) 2534 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 2535 else if (phydev->speed == SPEED_100) 2536 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 2537 2538 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2539 2540 pp->duplex = phydev->duplex; 2541 pp->speed = phydev->speed; 2542 } 2543 } 2544 2545 if (phydev->link != pp->link) { 2546 if (!phydev->link) { 2547 pp->duplex = -1; 2548 pp->speed = 0; 2549 } 2550 2551 pp->link = phydev->link; 2552 status_change = 1; 2553 } 2554 2555 if (status_change) { 2556 if (phydev->link) { 2557 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2558 val |= (MVNETA_GMAC_FORCE_LINK_PASS | 2559 MVNETA_GMAC_FORCE_LINK_DOWN); 2560 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2561 mvneta_port_up(pp); 2562 } else { 2563 mvneta_port_down(pp); 2564 } 2565 phy_print_status(phydev); 2566 } 2567 } 2568 2569 static int mvneta_mdio_probe(struct mvneta_port *pp) 2570 { 2571 struct phy_device *phy_dev; 2572 2573 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, 2574 pp->phy_interface); 2575 if (!phy_dev) { 2576 netdev_err(pp->dev, "could not find the PHY\n"); 2577 return -ENODEV; 2578 } 2579 2580 phy_dev->supported &= PHY_GBIT_FEATURES; 2581 phy_dev->advertising = phy_dev->supported; 2582 2583 pp->phy_dev = phy_dev; 2584 pp->link = 0; 2585 pp->duplex = 0; 2586 pp->speed = 0; 2587 2588 return 0; 2589 } 2590 2591 static void mvneta_mdio_remove(struct mvneta_port *pp) 2592 { 2593 phy_disconnect(pp->phy_dev); 2594 pp->phy_dev = NULL; 2595 } 2596 2597 static int mvneta_open(struct net_device *dev) 2598 { 2599 struct mvneta_port *pp = netdev_priv(dev); 2600 int ret; 2601 2602 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2603 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2604 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2605 2606 ret = mvneta_setup_rxqs(pp); 2607 if (ret) 2608 return ret; 2609 2610 ret = mvneta_setup_txqs(pp); 2611 if (ret) 2612 goto err_cleanup_rxqs; 2613 2614 /* Connect to port interrupt line */ 2615 ret = request_irq(pp->dev->irq, mvneta_isr, 0, 2616 MVNETA_DRIVER_NAME, pp); 2617 if (ret) { 2618 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); 2619 goto err_cleanup_txqs; 2620 } 2621 2622 /* In default link is down */ 2623 netif_carrier_off(pp->dev); 2624 2625 ret = mvneta_mdio_probe(pp); 2626 if (ret < 0) { 2627 netdev_err(dev, "cannot probe MDIO bus\n"); 2628 goto err_free_irq; 2629 } 2630 2631 mvneta_start_dev(pp); 2632 2633 return 0; 2634 2635 err_free_irq: 2636 free_irq(pp->dev->irq, pp); 2637 err_cleanup_txqs: 2638 mvneta_cleanup_txqs(pp); 2639 err_cleanup_rxqs: 2640 mvneta_cleanup_rxqs(pp); 2641 return ret; 2642 } 2643 2644 /* Stop the port, free port interrupt line */ 2645 static int mvneta_stop(struct net_device *dev) 2646 { 2647 struct mvneta_port *pp = netdev_priv(dev); 2648 2649 mvneta_stop_dev(pp); 2650 mvneta_mdio_remove(pp); 2651 free_irq(dev->irq, pp); 2652 mvneta_cleanup_rxqs(pp); 2653 mvneta_cleanup_txqs(pp); 2654 2655 return 0; 2656 } 2657 2658 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2659 { 2660 struct mvneta_port *pp = netdev_priv(dev); 2661 int ret; 2662 2663 if (!pp->phy_dev) 2664 return -ENOTSUPP; 2665 2666 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); 2667 if (!ret) 2668 mvneta_adjust_link(dev); 2669 2670 return ret; 2671 } 2672 2673 /* Ethtool methods */ 2674 2675 /* Get settings (phy address, speed) for ethtools */ 2676 int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2677 { 2678 struct mvneta_port *pp = netdev_priv(dev); 2679 2680 if (!pp->phy_dev) 2681 return -ENODEV; 2682 2683 return phy_ethtool_gset(pp->phy_dev, cmd); 2684 } 2685 2686 /* Set settings (phy address, speed) for ethtools */ 2687 int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2688 { 2689 struct mvneta_port *pp = netdev_priv(dev); 2690 2691 if (!pp->phy_dev) 2692 return -ENODEV; 2693 2694 return phy_ethtool_sset(pp->phy_dev, cmd); 2695 } 2696 2697 /* Set interrupt coalescing for ethtools */ 2698 static int mvneta_ethtool_set_coalesce(struct net_device *dev, 2699 struct ethtool_coalesce *c) 2700 { 2701 struct mvneta_port *pp = netdev_priv(dev); 2702 int queue; 2703 2704 for (queue = 0; queue < rxq_number; queue++) { 2705 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 2706 rxq->time_coal = c->rx_coalesce_usecs; 2707 rxq->pkts_coal = c->rx_max_coalesced_frames; 2708 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 2709 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 2710 } 2711 2712 for (queue = 0; queue < txq_number; queue++) { 2713 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 2714 txq->done_pkts_coal = c->tx_max_coalesced_frames; 2715 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 2716 } 2717 2718 return 0; 2719 } 2720 2721 /* get coalescing for ethtools */ 2722 static int mvneta_ethtool_get_coalesce(struct net_device *dev, 2723 struct ethtool_coalesce *c) 2724 { 2725 struct mvneta_port *pp = netdev_priv(dev); 2726 2727 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; 2728 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; 2729 2730 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; 2731 return 0; 2732 } 2733 2734 2735 static void mvneta_ethtool_get_drvinfo(struct net_device *dev, 2736 struct ethtool_drvinfo *drvinfo) 2737 { 2738 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, 2739 sizeof(drvinfo->driver)); 2740 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, 2741 sizeof(drvinfo->version)); 2742 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 2743 sizeof(drvinfo->bus_info)); 2744 } 2745 2746 2747 static void mvneta_ethtool_get_ringparam(struct net_device *netdev, 2748 struct ethtool_ringparam *ring) 2749 { 2750 struct mvneta_port *pp = netdev_priv(netdev); 2751 2752 ring->rx_max_pending = MVNETA_MAX_RXD; 2753 ring->tx_max_pending = MVNETA_MAX_TXD; 2754 ring->rx_pending = pp->rx_ring_size; 2755 ring->tx_pending = pp->tx_ring_size; 2756 } 2757 2758 static int mvneta_ethtool_set_ringparam(struct net_device *dev, 2759 struct ethtool_ringparam *ring) 2760 { 2761 struct mvneta_port *pp = netdev_priv(dev); 2762 2763 if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) 2764 return -EINVAL; 2765 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 2766 ring->rx_pending : MVNETA_MAX_RXD; 2767 2768 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, 2769 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); 2770 if (pp->tx_ring_size != ring->tx_pending) 2771 netdev_warn(dev, "TX queue size set to %u (requested %u)\n", 2772 pp->tx_ring_size, ring->tx_pending); 2773 2774 if (netif_running(dev)) { 2775 mvneta_stop(dev); 2776 if (mvneta_open(dev)) { 2777 netdev_err(dev, 2778 "error on opening device after ring param change\n"); 2779 return -ENOMEM; 2780 } 2781 } 2782 2783 return 0; 2784 } 2785 2786 static const struct net_device_ops mvneta_netdev_ops = { 2787 .ndo_open = mvneta_open, 2788 .ndo_stop = mvneta_stop, 2789 .ndo_start_xmit = mvneta_tx, 2790 .ndo_set_rx_mode = mvneta_set_rx_mode, 2791 .ndo_set_mac_address = mvneta_set_mac_addr, 2792 .ndo_change_mtu = mvneta_change_mtu, 2793 .ndo_get_stats64 = mvneta_get_stats64, 2794 .ndo_do_ioctl = mvneta_ioctl, 2795 }; 2796 2797 const struct ethtool_ops mvneta_eth_tool_ops = { 2798 .get_link = ethtool_op_get_link, 2799 .get_settings = mvneta_ethtool_get_settings, 2800 .set_settings = mvneta_ethtool_set_settings, 2801 .set_coalesce = mvneta_ethtool_set_coalesce, 2802 .get_coalesce = mvneta_ethtool_get_coalesce, 2803 .get_drvinfo = mvneta_ethtool_get_drvinfo, 2804 .get_ringparam = mvneta_ethtool_get_ringparam, 2805 .set_ringparam = mvneta_ethtool_set_ringparam, 2806 }; 2807 2808 /* Initialize hw */ 2809 static int mvneta_init(struct device *dev, struct mvneta_port *pp) 2810 { 2811 int queue; 2812 2813 /* Disable port */ 2814 mvneta_port_disable(pp); 2815 2816 /* Set port default values */ 2817 mvneta_defaults_set(pp); 2818 2819 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue), 2820 GFP_KERNEL); 2821 if (!pp->txqs) 2822 return -ENOMEM; 2823 2824 /* Initialize TX descriptor rings */ 2825 for (queue = 0; queue < txq_number; queue++) { 2826 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 2827 txq->id = queue; 2828 txq->size = pp->tx_ring_size; 2829 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 2830 } 2831 2832 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue), 2833 GFP_KERNEL); 2834 if (!pp->rxqs) 2835 return -ENOMEM; 2836 2837 /* Create Rx descriptor rings */ 2838 for (queue = 0; queue < rxq_number; queue++) { 2839 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 2840 rxq->id = queue; 2841 rxq->size = pp->rx_ring_size; 2842 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; 2843 rxq->time_coal = MVNETA_RX_COAL_USEC; 2844 } 2845 2846 return 0; 2847 } 2848 2849 /* platform glue : initialize decoding windows */ 2850 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 2851 const struct mbus_dram_target_info *dram) 2852 { 2853 u32 win_enable; 2854 u32 win_protect; 2855 int i; 2856 2857 for (i = 0; i < 6; i++) { 2858 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 2859 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 2860 2861 if (i < 4) 2862 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 2863 } 2864 2865 win_enable = 0x3f; 2866 win_protect = 0; 2867 2868 for (i = 0; i < dram->num_cs; i++) { 2869 const struct mbus_dram_window *cs = dram->cs + i; 2870 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | 2871 (cs->mbus_attr << 8) | dram->mbus_dram_target_id); 2872 2873 mvreg_write(pp, MVNETA_WIN_SIZE(i), 2874 (cs->size - 1) & 0xffff0000); 2875 2876 win_enable &= ~(1 << i); 2877 win_protect |= 3 << (2 * i); 2878 } 2879 2880 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 2881 } 2882 2883 /* Power up the port */ 2884 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 2885 { 2886 u32 ctrl; 2887 2888 /* MAC Cause register should be cleared */ 2889 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 2890 2891 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 2892 2893 /* Even though it might look weird, when we're configured in 2894 * SGMII or QSGMII mode, the RGMII bit needs to be set. 2895 */ 2896 switch(phy_mode) { 2897 case PHY_INTERFACE_MODE_QSGMII: 2898 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); 2899 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; 2900 break; 2901 case PHY_INTERFACE_MODE_SGMII: 2902 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); 2903 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; 2904 break; 2905 case PHY_INTERFACE_MODE_RGMII: 2906 case PHY_INTERFACE_MODE_RGMII_ID: 2907 ctrl |= MVNETA_GMAC2_PORT_RGMII; 2908 break; 2909 default: 2910 return -EINVAL; 2911 } 2912 2913 /* Cancel Port Reset */ 2914 ctrl &= ~MVNETA_GMAC2_PORT_RESET; 2915 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); 2916 2917 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 2918 MVNETA_GMAC2_PORT_RESET) != 0) 2919 continue; 2920 2921 return 0; 2922 } 2923 2924 /* Device initialization routine */ 2925 static int mvneta_probe(struct platform_device *pdev) 2926 { 2927 const struct mbus_dram_target_info *dram_target_info; 2928 struct resource *res; 2929 struct device_node *dn = pdev->dev.of_node; 2930 struct device_node *phy_node; 2931 struct mvneta_port *pp; 2932 struct net_device *dev; 2933 const char *dt_mac_addr; 2934 char hw_mac_addr[ETH_ALEN]; 2935 const char *mac_from; 2936 int phy_mode; 2937 int err; 2938 2939 /* Our multiqueue support is not complete, so for now, only 2940 * allow the usage of the first RX queue 2941 */ 2942 if (rxq_def != 0) { 2943 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def); 2944 return -EINVAL; 2945 } 2946 2947 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); 2948 if (!dev) 2949 return -ENOMEM; 2950 2951 dev->irq = irq_of_parse_and_map(dn, 0); 2952 if (dev->irq == 0) { 2953 err = -EINVAL; 2954 goto err_free_netdev; 2955 } 2956 2957 phy_node = of_parse_phandle(dn, "phy", 0); 2958 if (!phy_node) { 2959 if (!of_phy_is_fixed_link(dn)) { 2960 dev_err(&pdev->dev, "no PHY specified\n"); 2961 err = -ENODEV; 2962 goto err_free_irq; 2963 } 2964 2965 err = of_phy_register_fixed_link(dn); 2966 if (err < 0) { 2967 dev_err(&pdev->dev, "cannot register fixed PHY\n"); 2968 goto err_free_irq; 2969 } 2970 2971 /* In the case of a fixed PHY, the DT node associated 2972 * to the PHY is the Ethernet MAC DT node. 2973 */ 2974 phy_node = of_node_get(dn); 2975 } 2976 2977 phy_mode = of_get_phy_mode(dn); 2978 if (phy_mode < 0) { 2979 dev_err(&pdev->dev, "incorrect phy-mode\n"); 2980 err = -EINVAL; 2981 goto err_put_phy_node; 2982 } 2983 2984 dev->tx_queue_len = MVNETA_MAX_TXD; 2985 dev->watchdog_timeo = 5 * HZ; 2986 dev->netdev_ops = &mvneta_netdev_ops; 2987 2988 dev->ethtool_ops = &mvneta_eth_tool_ops; 2989 2990 pp = netdev_priv(dev); 2991 pp->phy_node = phy_node; 2992 pp->phy_interface = phy_mode; 2993 2994 pp->clk = devm_clk_get(&pdev->dev, NULL); 2995 if (IS_ERR(pp->clk)) { 2996 err = PTR_ERR(pp->clk); 2997 goto err_put_phy_node; 2998 } 2999 3000 clk_prepare_enable(pp->clk); 3001 3002 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3003 pp->base = devm_ioremap_resource(&pdev->dev, res); 3004 if (IS_ERR(pp->base)) { 3005 err = PTR_ERR(pp->base); 3006 goto err_clk; 3007 } 3008 3009 /* Alloc per-cpu stats */ 3010 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); 3011 if (!pp->stats) { 3012 err = -ENOMEM; 3013 goto err_clk; 3014 } 3015 3016 dt_mac_addr = of_get_mac_address(dn); 3017 if (dt_mac_addr) { 3018 mac_from = "device tree"; 3019 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN); 3020 } else { 3021 mvneta_get_mac_addr(pp, hw_mac_addr); 3022 if (is_valid_ether_addr(hw_mac_addr)) { 3023 mac_from = "hardware"; 3024 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); 3025 } else { 3026 mac_from = "random"; 3027 eth_hw_addr_random(dev); 3028 } 3029 } 3030 3031 pp->tx_ring_size = MVNETA_MAX_TXD; 3032 pp->rx_ring_size = MVNETA_MAX_RXD; 3033 3034 pp->dev = dev; 3035 SET_NETDEV_DEV(dev, &pdev->dev); 3036 3037 err = mvneta_init(&pdev->dev, pp); 3038 if (err < 0) 3039 goto err_free_stats; 3040 3041 err = mvneta_port_power_up(pp, phy_mode); 3042 if (err < 0) { 3043 dev_err(&pdev->dev, "can't power up port\n"); 3044 goto err_free_stats; 3045 } 3046 3047 dram_target_info = mv_mbus_dram_info(); 3048 if (dram_target_info) 3049 mvneta_conf_mbus_windows(pp, dram_target_info); 3050 3051 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); 3052 3053 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 3054 dev->hw_features |= dev->features; 3055 dev->vlan_features |= dev->features; 3056 dev->priv_flags |= IFF_UNICAST_FLT; 3057 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; 3058 3059 err = register_netdev(dev); 3060 if (err < 0) { 3061 dev_err(&pdev->dev, "failed to register\n"); 3062 goto err_free_stats; 3063 } 3064 3065 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 3066 dev->dev_addr); 3067 3068 platform_set_drvdata(pdev, pp->dev); 3069 3070 return 0; 3071 3072 err_free_stats: 3073 free_percpu(pp->stats); 3074 err_clk: 3075 clk_disable_unprepare(pp->clk); 3076 err_put_phy_node: 3077 of_node_put(phy_node); 3078 err_free_irq: 3079 irq_dispose_mapping(dev->irq); 3080 err_free_netdev: 3081 free_netdev(dev); 3082 return err; 3083 } 3084 3085 /* Device removal routine */ 3086 static int mvneta_remove(struct platform_device *pdev) 3087 { 3088 struct net_device *dev = platform_get_drvdata(pdev); 3089 struct mvneta_port *pp = netdev_priv(dev); 3090 3091 unregister_netdev(dev); 3092 clk_disable_unprepare(pp->clk); 3093 free_percpu(pp->stats); 3094 irq_dispose_mapping(dev->irq); 3095 of_node_put(pp->phy_node); 3096 free_netdev(dev); 3097 3098 return 0; 3099 } 3100 3101 static const struct of_device_id mvneta_match[] = { 3102 { .compatible = "marvell,armada-370-neta" }, 3103 { } 3104 }; 3105 MODULE_DEVICE_TABLE(of, mvneta_match); 3106 3107 static struct platform_driver mvneta_driver = { 3108 .probe = mvneta_probe, 3109 .remove = mvneta_remove, 3110 .driver = { 3111 .name = MVNETA_DRIVER_NAME, 3112 .of_match_table = mvneta_match, 3113 }, 3114 }; 3115 3116 module_platform_driver(mvneta_driver); 3117 3118 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 3119 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 3120 MODULE_LICENSE("GPL"); 3121 3122 module_param(rxq_number, int, S_IRUGO); 3123 module_param(txq_number, int, S_IRUGO); 3124 3125 module_param(rxq_def, int, S_IRUGO); 3126 module_param(rx_copybreak, int, S_IRUGO | S_IWUSR); 3127