1 /* 2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Rami Rosen <rosenr@marvell.com> 7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 8 * 9 * This file is licensed under the terms of the GNU General Public 10 * License version 2. This program is licensed "as is" without any 11 * warranty of any kind, whether express or implied. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/netdevice.h> 16 #include <linux/etherdevice.h> 17 #include <linux/platform_device.h> 18 #include <linux/skbuff.h> 19 #include <linux/inetdevice.h> 20 #include <linux/mbus.h> 21 #include <linux/module.h> 22 #include <linux/interrupt.h> 23 #include <net/ip.h> 24 #include <net/ipv6.h> 25 #include <linux/io.h> 26 #include <linux/of.h> 27 #include <linux/of_irq.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_net.h> 30 #include <linux/of_address.h> 31 #include <linux/phy.h> 32 #include <linux/clk.h> 33 34 /* Registers */ 35 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 36 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) 37 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 38 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 39 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 40 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) 41 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) 42 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) 43 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 44 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) 45 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) 46 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff 47 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) 48 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 49 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 50 #define MVNETA_PORT_RX_RESET 0x1cc0 51 #define MVNETA_PORT_RX_DMA_RESET BIT(0) 52 #define MVNETA_PHY_ADDR 0x2000 53 #define MVNETA_PHY_ADDR_MASK 0x1f 54 #define MVNETA_MBUS_RETRY 0x2010 55 #define MVNETA_UNIT_INTR_CAUSE 0x2080 56 #define MVNETA_UNIT_CONTROL 0x20B0 57 #define MVNETA_PHY_POLLING_ENABLE BIT(1) 58 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) 59 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 60 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 61 #define MVNETA_BASE_ADDR_ENABLE 0x2290 62 #define MVNETA_PORT_CONFIG 0x2400 63 #define MVNETA_UNI_PROMISC_MODE BIT(0) 64 #define MVNETA_DEF_RXQ(q) ((q) << 1) 65 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) 66 #define MVNETA_TX_UNSET_ERR_SUM BIT(12) 67 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) 68 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) 69 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) 70 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) 71 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ 72 MVNETA_DEF_RXQ_ARP(q) | \ 73 MVNETA_DEF_RXQ_TCP(q) | \ 74 MVNETA_DEF_RXQ_UDP(q) | \ 75 MVNETA_DEF_RXQ_BPDU(q) | \ 76 MVNETA_TX_UNSET_ERR_SUM | \ 77 MVNETA_RX_CSUM_WITH_PSEUDO_HDR) 78 #define MVNETA_PORT_CONFIG_EXTEND 0x2404 79 #define MVNETA_MAC_ADDR_LOW 0x2414 80 #define MVNETA_MAC_ADDR_HIGH 0x2418 81 #define MVNETA_SDMA_CONFIG 0x241c 82 #define MVNETA_SDMA_BRST_SIZE_16 4 83 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 84 #define MVNETA_RX_NO_DATA_SWAP BIT(4) 85 #define MVNETA_TX_NO_DATA_SWAP BIT(5) 86 #define MVNETA_DESC_SWAP BIT(6) 87 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 88 #define MVNETA_PORT_STATUS 0x2444 89 #define MVNETA_TX_IN_PRGRS BIT(1) 90 #define MVNETA_TX_FIFO_EMPTY BIT(8) 91 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c 92 #define MVNETA_SERDES_CFG 0x24A0 93 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 94 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 95 #define MVNETA_TYPE_PRIO 0x24bc 96 #define MVNETA_FORCE_UNI BIT(21) 97 #define MVNETA_TXQ_CMD_1 0x24e4 98 #define MVNETA_TXQ_CMD 0x2448 99 #define MVNETA_TXQ_DISABLE_SHIFT 8 100 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff 101 #define MVNETA_ACC_MODE 0x2500 102 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 103 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff 104 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 105 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) 106 107 /* Exception Interrupt Port/Queue Cause register */ 108 109 #define MVNETA_INTR_NEW_CAUSE 0x25a0 110 #define MVNETA_INTR_NEW_MASK 0x25a4 111 112 /* bits 0..7 = TXQ SENT, one bit per queue. 113 * bits 8..15 = RXQ OCCUP, one bit per queue. 114 * bits 16..23 = RXQ FREE, one bit per queue. 115 * bit 29 = OLD_REG_SUM, see old reg ? 116 * bit 30 = TX_ERR_SUM, one bit for 4 ports 117 * bit 31 = MISC_SUM, one bit for 4 ports 118 */ 119 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) 120 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) 121 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 122 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) 123 124 #define MVNETA_INTR_OLD_CAUSE 0x25a8 125 #define MVNETA_INTR_OLD_MASK 0x25ac 126 127 /* Data Path Port/Queue Cause Register */ 128 #define MVNETA_INTR_MISC_CAUSE 0x25b0 129 #define MVNETA_INTR_MISC_MASK 0x25b4 130 131 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) 132 #define MVNETA_CAUSE_LINK_CHANGE BIT(1) 133 #define MVNETA_CAUSE_PTP BIT(4) 134 135 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) 136 #define MVNETA_CAUSE_RX_OVERRUN BIT(8) 137 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) 138 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) 139 #define MVNETA_CAUSE_TX_UNDERUN BIT(11) 140 #define MVNETA_CAUSE_PRBS_ERR BIT(12) 141 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) 142 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) 143 144 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 145 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) 146 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) 147 148 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 149 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) 150 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) 151 152 #define MVNETA_INTR_ENABLE 0x25b8 153 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 154 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF 155 156 #define MVNETA_RXQ_CMD 0x2680 157 #define MVNETA_RXQ_DISABLE_SHIFT 8 158 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff 159 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) 160 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) 161 #define MVNETA_GMAC_CTRL_0 0x2c00 162 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 163 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 164 #define MVNETA_GMAC0_PORT_ENABLE BIT(0) 165 #define MVNETA_GMAC_CTRL_2 0x2c08 166 #define MVNETA_GMAC2_PCS_ENABLE BIT(3) 167 #define MVNETA_GMAC2_PORT_RGMII BIT(4) 168 #define MVNETA_GMAC2_PORT_RESET BIT(6) 169 #define MVNETA_GMAC_STATUS 0x2c10 170 #define MVNETA_GMAC_LINK_UP BIT(0) 171 #define MVNETA_GMAC_SPEED_1000 BIT(1) 172 #define MVNETA_GMAC_SPEED_100 BIT(2) 173 #define MVNETA_GMAC_FULL_DUPLEX BIT(3) 174 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) 175 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) 176 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) 177 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) 178 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 179 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 180 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 181 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 182 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 183 #define MVNETA_GMAC_AN_SPEED_EN BIT(7) 184 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 185 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) 186 #define MVNETA_MIB_COUNTERS_BASE 0x3080 187 #define MVNETA_MIB_LATE_COLLISION 0x7c 188 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 189 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 190 #define MVNETA_DA_FILT_UCAST_BASE 0x3600 191 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) 192 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) 193 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 194 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) 195 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) 196 #define MVNETA_TXQ_DEC_SENT_SHIFT 16 197 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) 198 #define MVNETA_TXQ_SENT_DESC_SHIFT 16 199 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 200 #define MVNETA_PORT_TX_RESET 0x3cf0 201 #define MVNETA_PORT_TX_DMA_RESET BIT(0) 202 #define MVNETA_TX_MTU 0x3e0c 203 #define MVNETA_TX_TOKEN_SIZE 0x3e14 204 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff 205 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) 206 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff 207 208 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 209 210 /* Descriptor ring Macros */ 211 #define MVNETA_QUEUE_NEXT_DESC(q, index) \ 212 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 213 214 /* Various constants */ 215 216 /* Coalescing */ 217 #define MVNETA_TXDONE_COAL_PKTS 16 218 #define MVNETA_RX_COAL_PKTS 32 219 #define MVNETA_RX_COAL_USEC 100 220 221 /* Napi polling weight */ 222 #define MVNETA_RX_POLL_WEIGHT 64 223 224 /* The two bytes Marvell header. Either contains a special value used 225 * by Marvell switches when a specific hardware mode is enabled (not 226 * supported by this driver) or is filled automatically by zeroes on 227 * the RX side. Those two bytes being at the front of the Ethernet 228 * header, they allow to have the IP header aligned on a 4 bytes 229 * boundary automatically: the hardware skips those two bytes on its 230 * own. 231 */ 232 #define MVNETA_MH_SIZE 2 233 234 #define MVNETA_VLAN_TAG_LEN 4 235 236 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 237 #define MVNETA_TX_CSUM_MAX_SIZE 9800 238 #define MVNETA_ACC_MODE_EXT 1 239 240 /* Timeout constants */ 241 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 242 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 243 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 244 245 #define MVNETA_TX_MTU_MAX 0x3ffff 246 247 /* Max number of Rx descriptors */ 248 #define MVNETA_MAX_RXD 128 249 250 /* Max number of Tx descriptors */ 251 #define MVNETA_MAX_TXD 532 252 253 /* descriptor aligned size */ 254 #define MVNETA_DESC_ALIGNED_SIZE 32 255 256 #define MVNETA_RX_PKT_SIZE(mtu) \ 257 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 258 ETH_HLEN + ETH_FCS_LEN, \ 259 MVNETA_CPU_D_CACHE_LINE_SIZE) 260 261 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 262 263 struct mvneta_pcpu_stats { 264 struct u64_stats_sync syncp; 265 u64 rx_packets; 266 u64 rx_bytes; 267 u64 tx_packets; 268 u64 tx_bytes; 269 }; 270 271 struct mvneta_port { 272 int pkt_size; 273 unsigned int frag_size; 274 void __iomem *base; 275 struct mvneta_rx_queue *rxqs; 276 struct mvneta_tx_queue *txqs; 277 struct net_device *dev; 278 279 u32 cause_rx_tx; 280 struct napi_struct napi; 281 282 /* Napi weight */ 283 int weight; 284 285 /* Core clock */ 286 struct clk *clk; 287 u8 mcast_count[256]; 288 u16 tx_ring_size; 289 u16 rx_ring_size; 290 struct mvneta_pcpu_stats *stats; 291 292 struct mii_bus *mii_bus; 293 struct phy_device *phy_dev; 294 phy_interface_t phy_interface; 295 struct device_node *phy_node; 296 unsigned int link; 297 unsigned int duplex; 298 unsigned int speed; 299 }; 300 301 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the 302 * layout of the transmit and reception DMA descriptors, and their 303 * layout is therefore defined by the hardware design 304 */ 305 306 #define MVNETA_TX_L3_OFF_SHIFT 0 307 #define MVNETA_TX_IP_HLEN_SHIFT 8 308 #define MVNETA_TX_L4_UDP BIT(16) 309 #define MVNETA_TX_L3_IP6 BIT(17) 310 #define MVNETA_TXD_IP_CSUM BIT(18) 311 #define MVNETA_TXD_Z_PAD BIT(19) 312 #define MVNETA_TXD_L_DESC BIT(20) 313 #define MVNETA_TXD_F_DESC BIT(21) 314 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ 315 MVNETA_TXD_L_DESC | \ 316 MVNETA_TXD_F_DESC) 317 #define MVNETA_TX_L4_CSUM_FULL BIT(30) 318 #define MVNETA_TX_L4_CSUM_NOT BIT(31) 319 320 #define MVNETA_RXD_ERR_CRC 0x0 321 #define MVNETA_RXD_ERR_SUMMARY BIT(16) 322 #define MVNETA_RXD_ERR_OVERRUN BIT(17) 323 #define MVNETA_RXD_ERR_LEN BIT(18) 324 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) 325 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) 326 #define MVNETA_RXD_L3_IP4 BIT(25) 327 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) 328 #define MVNETA_RXD_L4_CSUM_OK BIT(30) 329 330 #if defined(__LITTLE_ENDIAN) 331 struct mvneta_tx_desc { 332 u32 command; /* Options used by HW for packet transmitting.*/ 333 u16 reserverd1; /* csum_l4 (for future use) */ 334 u16 data_size; /* Data size of transmitted packet in bytes */ 335 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 336 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 337 u32 reserved3[4]; /* Reserved - (for future use) */ 338 }; 339 340 struct mvneta_rx_desc { 341 u32 status; /* Info about received packet */ 342 u16 reserved1; /* pnc_info - (for future use, PnC) */ 343 u16 data_size; /* Size of received packet in bytes */ 344 345 u32 buf_phys_addr; /* Physical address of the buffer */ 346 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 347 348 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 349 u16 reserved3; /* prefetch_cmd, for future use */ 350 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 351 352 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 353 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 354 }; 355 #else 356 struct mvneta_tx_desc { 357 u16 data_size; /* Data size of transmitted packet in bytes */ 358 u16 reserverd1; /* csum_l4 (for future use) */ 359 u32 command; /* Options used by HW for packet transmitting.*/ 360 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 361 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 362 u32 reserved3[4]; /* Reserved - (for future use) */ 363 }; 364 365 struct mvneta_rx_desc { 366 u16 data_size; /* Size of received packet in bytes */ 367 u16 reserved1; /* pnc_info - (for future use, PnC) */ 368 u32 status; /* Info about received packet */ 369 370 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 371 u32 buf_phys_addr; /* Physical address of the buffer */ 372 373 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 374 u16 reserved3; /* prefetch_cmd, for future use */ 375 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 376 377 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 378 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 379 }; 380 #endif 381 382 struct mvneta_tx_queue { 383 /* Number of this TX queue, in the range 0-7 */ 384 u8 id; 385 386 /* Number of TX DMA descriptors in the descriptor ring */ 387 int size; 388 389 /* Number of currently used TX DMA descriptor in the 390 * descriptor ring 391 */ 392 int count; 393 394 /* Array of transmitted skb */ 395 struct sk_buff **tx_skb; 396 397 /* Index of last TX DMA descriptor that was inserted */ 398 int txq_put_index; 399 400 /* Index of the TX DMA descriptor to be cleaned up */ 401 int txq_get_index; 402 403 u32 done_pkts_coal; 404 405 /* Virtual address of the TX DMA descriptors array */ 406 struct mvneta_tx_desc *descs; 407 408 /* DMA address of the TX DMA descriptors array */ 409 dma_addr_t descs_phys; 410 411 /* Index of the last TX DMA descriptor */ 412 int last_desc; 413 414 /* Index of the next TX DMA descriptor to process */ 415 int next_desc_to_proc; 416 }; 417 418 struct mvneta_rx_queue { 419 /* rx queue number, in the range 0-7 */ 420 u8 id; 421 422 /* num of rx descriptors in the rx descriptor ring */ 423 int size; 424 425 /* counter of times when mvneta_refill() failed */ 426 int missed; 427 428 u32 pkts_coal; 429 u32 time_coal; 430 431 /* Virtual address of the RX DMA descriptors array */ 432 struct mvneta_rx_desc *descs; 433 434 /* DMA address of the RX DMA descriptors array */ 435 dma_addr_t descs_phys; 436 437 /* Index of the last RX DMA descriptor */ 438 int last_desc; 439 440 /* Index of the next RX DMA descriptor to process */ 441 int next_desc_to_proc; 442 }; 443 444 static int rxq_number = 8; 445 static int txq_number = 8; 446 447 static int rxq_def; 448 449 static int rx_copybreak __read_mostly = 256; 450 451 #define MVNETA_DRIVER_NAME "mvneta" 452 #define MVNETA_DRIVER_VERSION "1.0" 453 454 /* Utility/helper methods */ 455 456 /* Write helper method */ 457 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) 458 { 459 writel(data, pp->base + offset); 460 } 461 462 /* Read helper method */ 463 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) 464 { 465 return readl(pp->base + offset); 466 } 467 468 /* Increment txq get counter */ 469 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) 470 { 471 txq->txq_get_index++; 472 if (txq->txq_get_index == txq->size) 473 txq->txq_get_index = 0; 474 } 475 476 /* Increment txq put counter */ 477 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) 478 { 479 txq->txq_put_index++; 480 if (txq->txq_put_index == txq->size) 481 txq->txq_put_index = 0; 482 } 483 484 485 /* Clear all MIB counters */ 486 static void mvneta_mib_counters_clear(struct mvneta_port *pp) 487 { 488 int i; 489 u32 dummy; 490 491 /* Perform dummy reads from MIB counters */ 492 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) 493 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); 494 } 495 496 /* Get System Network Statistics */ 497 struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev, 498 struct rtnl_link_stats64 *stats) 499 { 500 struct mvneta_port *pp = netdev_priv(dev); 501 unsigned int start; 502 int cpu; 503 504 for_each_possible_cpu(cpu) { 505 struct mvneta_pcpu_stats *cpu_stats; 506 u64 rx_packets; 507 u64 rx_bytes; 508 u64 tx_packets; 509 u64 tx_bytes; 510 511 cpu_stats = per_cpu_ptr(pp->stats, cpu); 512 do { 513 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 514 rx_packets = cpu_stats->rx_packets; 515 rx_bytes = cpu_stats->rx_bytes; 516 tx_packets = cpu_stats->tx_packets; 517 tx_bytes = cpu_stats->tx_bytes; 518 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 519 520 stats->rx_packets += rx_packets; 521 stats->rx_bytes += rx_bytes; 522 stats->tx_packets += tx_packets; 523 stats->tx_bytes += tx_bytes; 524 } 525 526 stats->rx_errors = dev->stats.rx_errors; 527 stats->rx_dropped = dev->stats.rx_dropped; 528 529 stats->tx_dropped = dev->stats.tx_dropped; 530 531 return stats; 532 } 533 534 /* Rx descriptors helper methods */ 535 536 /* Checks whether the RX descriptor having this status is both the first 537 * and the last descriptor for the RX packet. Each RX packet is currently 538 * received through a single RX descriptor, so not having each RX 539 * descriptor with its first and last bits set is an error 540 */ 541 static int mvneta_rxq_desc_is_first_last(u32 status) 542 { 543 return (status & MVNETA_RXD_FIRST_LAST_DESC) == 544 MVNETA_RXD_FIRST_LAST_DESC; 545 } 546 547 /* Add number of descriptors ready to receive new packets */ 548 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, 549 struct mvneta_rx_queue *rxq, 550 int ndescs) 551 { 552 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can 553 * be added at once 554 */ 555 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { 556 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 557 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << 558 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 559 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; 560 } 561 562 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 563 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 564 } 565 566 /* Get number of RX descriptors occupied by received packets */ 567 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, 568 struct mvneta_rx_queue *rxq) 569 { 570 u32 val; 571 572 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); 573 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; 574 } 575 576 /* Update num of rx desc called upon return from rx path or 577 * from mvneta_rxq_drop_pkts(). 578 */ 579 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, 580 struct mvneta_rx_queue *rxq, 581 int rx_done, int rx_filled) 582 { 583 u32 val; 584 585 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { 586 val = rx_done | 587 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); 588 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 589 return; 590 } 591 592 /* Only 255 descriptors can be added at once */ 593 while ((rx_done > 0) || (rx_filled > 0)) { 594 if (rx_done <= 0xff) { 595 val = rx_done; 596 rx_done = 0; 597 } else { 598 val = 0xff; 599 rx_done -= 0xff; 600 } 601 if (rx_filled <= 0xff) { 602 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 603 rx_filled = 0; 604 } else { 605 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 606 rx_filled -= 0xff; 607 } 608 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 609 } 610 } 611 612 /* Get pointer to next RX descriptor to be processed by SW */ 613 static struct mvneta_rx_desc * 614 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) 615 { 616 int rx_desc = rxq->next_desc_to_proc; 617 618 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); 619 prefetch(rxq->descs + rxq->next_desc_to_proc); 620 return rxq->descs + rx_desc; 621 } 622 623 /* Change maximum receive size of the port. */ 624 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) 625 { 626 u32 val; 627 628 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 629 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; 630 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << 631 MVNETA_GMAC_MAX_RX_SIZE_SHIFT; 632 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 633 } 634 635 636 /* Set rx queue offset */ 637 static void mvneta_rxq_offset_set(struct mvneta_port *pp, 638 struct mvneta_rx_queue *rxq, 639 int offset) 640 { 641 u32 val; 642 643 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 644 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; 645 646 /* Offset is in */ 647 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); 648 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 649 } 650 651 652 /* Tx descriptors helper methods */ 653 654 /* Update HW with number of TX descriptors to be sent */ 655 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, 656 struct mvneta_tx_queue *txq, 657 int pend_desc) 658 { 659 u32 val; 660 661 /* Only 255 descriptors can be added at once ; Assume caller 662 * process TX desriptors in quanta less than 256 663 */ 664 val = pend_desc; 665 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 666 } 667 668 /* Get pointer to next TX descriptor to be processed (send) by HW */ 669 static struct mvneta_tx_desc * 670 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) 671 { 672 int tx_desc = txq->next_desc_to_proc; 673 674 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); 675 return txq->descs + tx_desc; 676 } 677 678 /* Release the last allocated TX descriptor. Useful to handle DMA 679 * mapping failures in the TX path. 680 */ 681 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) 682 { 683 if (txq->next_desc_to_proc == 0) 684 txq->next_desc_to_proc = txq->last_desc - 1; 685 else 686 txq->next_desc_to_proc--; 687 } 688 689 /* Set rxq buf size */ 690 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, 691 struct mvneta_rx_queue *rxq, 692 int buf_size) 693 { 694 u32 val; 695 696 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); 697 698 val &= ~MVNETA_RXQ_BUF_SIZE_MASK; 699 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); 700 701 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); 702 } 703 704 /* Disable buffer management (BM) */ 705 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, 706 struct mvneta_rx_queue *rxq) 707 { 708 u32 val; 709 710 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 711 val &= ~MVNETA_RXQ_HW_BUF_ALLOC; 712 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 713 } 714 715 /* Start the Ethernet port RX and TX activity */ 716 static void mvneta_port_up(struct mvneta_port *pp) 717 { 718 int queue; 719 u32 q_map; 720 721 /* Enable all initialized TXs. */ 722 mvneta_mib_counters_clear(pp); 723 q_map = 0; 724 for (queue = 0; queue < txq_number; queue++) { 725 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 726 if (txq->descs != NULL) 727 q_map |= (1 << queue); 728 } 729 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 730 731 /* Enable all initialized RXQs. */ 732 q_map = 0; 733 for (queue = 0; queue < rxq_number; queue++) { 734 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 735 if (rxq->descs != NULL) 736 q_map |= (1 << queue); 737 } 738 739 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); 740 } 741 742 /* Stop the Ethernet port activity */ 743 static void mvneta_port_down(struct mvneta_port *pp) 744 { 745 u32 val; 746 int count; 747 748 /* Stop Rx port activity. Check port Rx activity. */ 749 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; 750 751 /* Issue stop command for active channels only */ 752 if (val != 0) 753 mvreg_write(pp, MVNETA_RXQ_CMD, 754 val << MVNETA_RXQ_DISABLE_SHIFT); 755 756 /* Wait for all Rx activity to terminate. */ 757 count = 0; 758 do { 759 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { 760 netdev_warn(pp->dev, 761 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", 762 val); 763 break; 764 } 765 mdelay(1); 766 767 val = mvreg_read(pp, MVNETA_RXQ_CMD); 768 } while (val & 0xff); 769 770 /* Stop Tx port activity. Check port Tx activity. Issue stop 771 * command for active channels only 772 */ 773 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; 774 775 if (val != 0) 776 mvreg_write(pp, MVNETA_TXQ_CMD, 777 (val << MVNETA_TXQ_DISABLE_SHIFT)); 778 779 /* Wait for all Tx activity to terminate. */ 780 count = 0; 781 do { 782 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { 783 netdev_warn(pp->dev, 784 "TIMEOUT for TX stopped status=0x%08x\n", 785 val); 786 break; 787 } 788 mdelay(1); 789 790 /* Check TX Command reg that all Txqs are stopped */ 791 val = mvreg_read(pp, MVNETA_TXQ_CMD); 792 793 } while (val & 0xff); 794 795 /* Double check to verify that TX FIFO is empty */ 796 count = 0; 797 do { 798 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { 799 netdev_warn(pp->dev, 800 "TX FIFO empty timeout status=0x08%x\n", 801 val); 802 break; 803 } 804 mdelay(1); 805 806 val = mvreg_read(pp, MVNETA_PORT_STATUS); 807 } while (!(val & MVNETA_TX_FIFO_EMPTY) && 808 (val & MVNETA_TX_IN_PRGRS)); 809 810 udelay(200); 811 } 812 813 /* Enable the port by setting the port enable bit of the MAC control register */ 814 static void mvneta_port_enable(struct mvneta_port *pp) 815 { 816 u32 val; 817 818 /* Enable port */ 819 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 820 val |= MVNETA_GMAC0_PORT_ENABLE; 821 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 822 } 823 824 /* Disable the port and wait for about 200 usec before retuning */ 825 static void mvneta_port_disable(struct mvneta_port *pp) 826 { 827 u32 val; 828 829 /* Reset the Enable bit in the Serial Control Register */ 830 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 831 val &= ~MVNETA_GMAC0_PORT_ENABLE; 832 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 833 834 udelay(200); 835 } 836 837 /* Multicast tables methods */ 838 839 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ 840 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) 841 { 842 int offset; 843 u32 val; 844 845 if (queue == -1) { 846 val = 0; 847 } else { 848 val = 0x1 | (queue << 1); 849 val |= (val << 24) | (val << 16) | (val << 8); 850 } 851 852 for (offset = 0; offset <= 0xc; offset += 4) 853 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); 854 } 855 856 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ 857 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) 858 { 859 int offset; 860 u32 val; 861 862 if (queue == -1) { 863 val = 0; 864 } else { 865 val = 0x1 | (queue << 1); 866 val |= (val << 24) | (val << 16) | (val << 8); 867 } 868 869 for (offset = 0; offset <= 0xfc; offset += 4) 870 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); 871 872 } 873 874 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ 875 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) 876 { 877 int offset; 878 u32 val; 879 880 if (queue == -1) { 881 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); 882 val = 0; 883 } else { 884 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); 885 val = 0x1 | (queue << 1); 886 val |= (val << 24) | (val << 16) | (val << 8); 887 } 888 889 for (offset = 0; offset <= 0xfc; offset += 4) 890 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); 891 } 892 893 /* This method sets defaults to the NETA port: 894 * Clears interrupt Cause and Mask registers. 895 * Clears all MAC tables. 896 * Sets defaults to all registers. 897 * Resets RX and TX descriptor rings. 898 * Resets PHY. 899 * This method can be called after mvneta_port_down() to return the port 900 * settings to defaults. 901 */ 902 static void mvneta_defaults_set(struct mvneta_port *pp) 903 { 904 int cpu; 905 int queue; 906 u32 val; 907 908 /* Clear all Cause registers */ 909 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 910 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 911 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 912 913 /* Mask all interrupts */ 914 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 915 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 916 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 917 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 918 919 /* Enable MBUS Retry bit16 */ 920 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); 921 922 /* Set CPU queue access map - all CPUs have access to all RX 923 * queues and to all TX queues 924 */ 925 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) 926 mvreg_write(pp, MVNETA_CPU_MAP(cpu), 927 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | 928 MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); 929 930 /* Reset RX and TX DMAs */ 931 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 932 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 933 934 /* Disable Legacy WRR, Disable EJP, Release from reset */ 935 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); 936 for (queue = 0; queue < txq_number; queue++) { 937 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); 938 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); 939 } 940 941 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 942 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 943 944 /* Set Port Acceleration Mode */ 945 val = MVNETA_ACC_MODE_EXT; 946 mvreg_write(pp, MVNETA_ACC_MODE, val); 947 948 /* Update val of portCfg register accordingly with all RxQueue types */ 949 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); 950 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 951 952 val = 0; 953 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); 954 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); 955 956 /* Build PORT_SDMA_CONFIG_REG */ 957 val = 0; 958 959 /* Default burst size */ 960 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 961 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 962 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; 963 964 #if defined(__BIG_ENDIAN) 965 val |= MVNETA_DESC_SWAP; 966 #endif 967 968 /* Assign port SDMA configuration */ 969 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 970 971 /* Disable PHY polling in hardware, since we're using the 972 * kernel phylib to do this. 973 */ 974 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); 975 val &= ~MVNETA_PHY_POLLING_ENABLE; 976 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); 977 978 mvneta_set_ucast_table(pp, -1); 979 mvneta_set_special_mcast_table(pp, -1); 980 mvneta_set_other_mcast_table(pp, -1); 981 982 /* Set port interrupt enable register - default enable all */ 983 mvreg_write(pp, MVNETA_INTR_ENABLE, 984 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK 985 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); 986 } 987 988 /* Set max sizes for tx queues */ 989 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) 990 991 { 992 u32 val, size, mtu; 993 int queue; 994 995 mtu = max_tx_size * 8; 996 if (mtu > MVNETA_TX_MTU_MAX) 997 mtu = MVNETA_TX_MTU_MAX; 998 999 /* Set MTU */ 1000 val = mvreg_read(pp, MVNETA_TX_MTU); 1001 val &= ~MVNETA_TX_MTU_MAX; 1002 val |= mtu; 1003 mvreg_write(pp, MVNETA_TX_MTU, val); 1004 1005 /* TX token size and all TXQs token size must be larger that MTU */ 1006 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); 1007 1008 size = val & MVNETA_TX_TOKEN_SIZE_MAX; 1009 if (size < mtu) { 1010 size = mtu; 1011 val &= ~MVNETA_TX_TOKEN_SIZE_MAX; 1012 val |= size; 1013 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); 1014 } 1015 for (queue = 0; queue < txq_number; queue++) { 1016 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); 1017 1018 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; 1019 if (size < mtu) { 1020 size = mtu; 1021 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; 1022 val |= size; 1023 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); 1024 } 1025 } 1026 } 1027 1028 /* Set unicast address */ 1029 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, 1030 int queue) 1031 { 1032 unsigned int unicast_reg; 1033 unsigned int tbl_offset; 1034 unsigned int reg_offset; 1035 1036 /* Locate the Unicast table entry */ 1037 last_nibble = (0xf & last_nibble); 1038 1039 /* offset from unicast tbl base */ 1040 tbl_offset = (last_nibble / 4) * 4; 1041 1042 /* offset within the above reg */ 1043 reg_offset = last_nibble % 4; 1044 1045 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); 1046 1047 if (queue == -1) { 1048 /* Clear accepts frame bit at specified unicast DA tbl entry */ 1049 unicast_reg &= ~(0xff << (8 * reg_offset)); 1050 } else { 1051 unicast_reg &= ~(0xff << (8 * reg_offset)); 1052 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1053 } 1054 1055 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); 1056 } 1057 1058 /* Set mac address */ 1059 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, 1060 int queue) 1061 { 1062 unsigned int mac_h; 1063 unsigned int mac_l; 1064 1065 if (queue != -1) { 1066 mac_l = (addr[4] << 8) | (addr[5]); 1067 mac_h = (addr[0] << 24) | (addr[1] << 16) | 1068 (addr[2] << 8) | (addr[3] << 0); 1069 1070 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); 1071 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); 1072 } 1073 1074 /* Accept frames of this address */ 1075 mvneta_set_ucast_addr(pp, addr[5], queue); 1076 } 1077 1078 /* Set the number of packets that will be received before RX interrupt 1079 * will be generated by HW. 1080 */ 1081 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, 1082 struct mvneta_rx_queue *rxq, u32 value) 1083 { 1084 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), 1085 value | MVNETA_RXQ_NON_OCCUPIED(0)); 1086 rxq->pkts_coal = value; 1087 } 1088 1089 /* Set the time delay in usec before RX interrupt will be generated by 1090 * HW. 1091 */ 1092 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, 1093 struct mvneta_rx_queue *rxq, u32 value) 1094 { 1095 u32 val; 1096 unsigned long clk_rate; 1097 1098 clk_rate = clk_get_rate(pp->clk); 1099 val = (clk_rate / 1000000) * value; 1100 1101 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); 1102 rxq->time_coal = value; 1103 } 1104 1105 /* Set threshold for TX_DONE pkts coalescing */ 1106 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, 1107 struct mvneta_tx_queue *txq, u32 value) 1108 { 1109 u32 val; 1110 1111 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); 1112 1113 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; 1114 val |= MVNETA_TXQ_SENT_THRESH_MASK(value); 1115 1116 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); 1117 1118 txq->done_pkts_coal = value; 1119 } 1120 1121 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ 1122 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, 1123 u32 phys_addr, u32 cookie) 1124 { 1125 rx_desc->buf_cookie = cookie; 1126 rx_desc->buf_phys_addr = phys_addr; 1127 } 1128 1129 /* Decrement sent descriptors counter */ 1130 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, 1131 struct mvneta_tx_queue *txq, 1132 int sent_desc) 1133 { 1134 u32 val; 1135 1136 /* Only 255 TX descriptors can be updated at once */ 1137 while (sent_desc > 0xff) { 1138 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; 1139 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1140 sent_desc = sent_desc - 0xff; 1141 } 1142 1143 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; 1144 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1145 } 1146 1147 /* Get number of TX descriptors already sent by HW */ 1148 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, 1149 struct mvneta_tx_queue *txq) 1150 { 1151 u32 val; 1152 int sent_desc; 1153 1154 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); 1155 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> 1156 MVNETA_TXQ_SENT_DESC_SHIFT; 1157 1158 return sent_desc; 1159 } 1160 1161 /* Get number of sent descriptors and decrement counter. 1162 * The number of sent descriptors is returned. 1163 */ 1164 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, 1165 struct mvneta_tx_queue *txq) 1166 { 1167 int sent_desc; 1168 1169 /* Get number of sent descriptors */ 1170 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); 1171 1172 /* Decrement sent descriptors counter */ 1173 if (sent_desc) 1174 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); 1175 1176 return sent_desc; 1177 } 1178 1179 /* Set TXQ descriptors fields relevant for CSUM calculation */ 1180 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, 1181 int ip_hdr_len, int l4_proto) 1182 { 1183 u32 command; 1184 1185 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 1186 * G_L4_chk, L4_type; required only for checksum 1187 * calculation 1188 */ 1189 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1190 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1191 1192 if (l3_proto == swab16(ETH_P_IP)) 1193 command |= MVNETA_TXD_IP_CSUM; 1194 else 1195 command |= MVNETA_TX_L3_IP6; 1196 1197 if (l4_proto == IPPROTO_TCP) 1198 command |= MVNETA_TX_L4_CSUM_FULL; 1199 else if (l4_proto == IPPROTO_UDP) 1200 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; 1201 else 1202 command |= MVNETA_TX_L4_CSUM_NOT; 1203 1204 return command; 1205 } 1206 1207 1208 /* Display more error info */ 1209 static void mvneta_rx_error(struct mvneta_port *pp, 1210 struct mvneta_rx_desc *rx_desc) 1211 { 1212 u32 status = rx_desc->status; 1213 1214 if (!mvneta_rxq_desc_is_first_last(status)) { 1215 netdev_err(pp->dev, 1216 "bad rx status %08x (buffer oversize), size=%d\n", 1217 status, rx_desc->data_size); 1218 return; 1219 } 1220 1221 switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1222 case MVNETA_RXD_ERR_CRC: 1223 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", 1224 status, rx_desc->data_size); 1225 break; 1226 case MVNETA_RXD_ERR_OVERRUN: 1227 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", 1228 status, rx_desc->data_size); 1229 break; 1230 case MVNETA_RXD_ERR_LEN: 1231 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", 1232 status, rx_desc->data_size); 1233 break; 1234 case MVNETA_RXD_ERR_RESOURCE: 1235 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", 1236 status, rx_desc->data_size); 1237 break; 1238 } 1239 } 1240 1241 /* Handle RX checksum offload based on the descriptor's status */ 1242 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, 1243 struct sk_buff *skb) 1244 { 1245 if ((status & MVNETA_RXD_L3_IP4) && 1246 (status & MVNETA_RXD_L4_CSUM_OK)) { 1247 skb->csum = 0; 1248 skb->ip_summed = CHECKSUM_UNNECESSARY; 1249 return; 1250 } 1251 1252 skb->ip_summed = CHECKSUM_NONE; 1253 } 1254 1255 /* Return tx queue pointer (find last set bit) according to <cause> returned 1256 * form tx_done reg. <cause> must not be null. The return value is always a 1257 * valid queue for matching the first one found in <cause>. 1258 */ 1259 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, 1260 u32 cause) 1261 { 1262 int queue = fls(cause) - 1; 1263 1264 return &pp->txqs[queue]; 1265 } 1266 1267 /* Free tx queue skbuffs */ 1268 static void mvneta_txq_bufs_free(struct mvneta_port *pp, 1269 struct mvneta_tx_queue *txq, int num) 1270 { 1271 int i; 1272 1273 for (i = 0; i < num; i++) { 1274 struct mvneta_tx_desc *tx_desc = txq->descs + 1275 txq->txq_get_index; 1276 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; 1277 1278 mvneta_txq_inc_get(txq); 1279 1280 if (!skb) 1281 continue; 1282 1283 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, 1284 tx_desc->data_size, DMA_TO_DEVICE); 1285 dev_kfree_skb_any(skb); 1286 } 1287 } 1288 1289 /* Handle end of transmission */ 1290 static void mvneta_txq_done(struct mvneta_port *pp, 1291 struct mvneta_tx_queue *txq) 1292 { 1293 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 1294 int tx_done; 1295 1296 tx_done = mvneta_txq_sent_desc_proc(pp, txq); 1297 if (!tx_done) 1298 return; 1299 1300 mvneta_txq_bufs_free(pp, txq, tx_done); 1301 1302 txq->count -= tx_done; 1303 1304 if (netif_tx_queue_stopped(nq)) { 1305 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1) 1306 netif_tx_wake_queue(nq); 1307 } 1308 } 1309 1310 static void *mvneta_frag_alloc(const struct mvneta_port *pp) 1311 { 1312 if (likely(pp->frag_size <= PAGE_SIZE)) 1313 return netdev_alloc_frag(pp->frag_size); 1314 else 1315 return kmalloc(pp->frag_size, GFP_ATOMIC); 1316 } 1317 1318 static void mvneta_frag_free(const struct mvneta_port *pp, void *data) 1319 { 1320 if (likely(pp->frag_size <= PAGE_SIZE)) 1321 put_page(virt_to_head_page(data)); 1322 else 1323 kfree(data); 1324 } 1325 1326 /* Refill processing */ 1327 static int mvneta_rx_refill(struct mvneta_port *pp, 1328 struct mvneta_rx_desc *rx_desc) 1329 1330 { 1331 dma_addr_t phys_addr; 1332 void *data; 1333 1334 data = mvneta_frag_alloc(pp); 1335 if (!data) 1336 return -ENOMEM; 1337 1338 phys_addr = dma_map_single(pp->dev->dev.parent, data, 1339 MVNETA_RX_BUF_SIZE(pp->pkt_size), 1340 DMA_FROM_DEVICE); 1341 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { 1342 mvneta_frag_free(pp, data); 1343 return -ENOMEM; 1344 } 1345 1346 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data); 1347 return 0; 1348 } 1349 1350 /* Handle tx checksum */ 1351 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) 1352 { 1353 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1354 int ip_hdr_len = 0; 1355 u8 l4_proto; 1356 1357 if (skb->protocol == htons(ETH_P_IP)) { 1358 struct iphdr *ip4h = ip_hdr(skb); 1359 1360 /* Calculate IPv4 checksum and L4 checksum */ 1361 ip_hdr_len = ip4h->ihl; 1362 l4_proto = ip4h->protocol; 1363 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1364 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1365 1366 /* Read l4_protocol from one of IPv6 extra headers */ 1367 if (skb_network_header_len(skb) > 0) 1368 ip_hdr_len = (skb_network_header_len(skb) >> 2); 1369 l4_proto = ip6h->nexthdr; 1370 } else 1371 return MVNETA_TX_L4_CSUM_NOT; 1372 1373 return mvneta_txq_desc_csum(skb_network_offset(skb), 1374 skb->protocol, ip_hdr_len, l4_proto); 1375 } 1376 1377 return MVNETA_TX_L4_CSUM_NOT; 1378 } 1379 1380 /* Returns rx queue pointer (find last set bit) according to causeRxTx 1381 * value 1382 */ 1383 static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp, 1384 u32 cause) 1385 { 1386 int queue = fls(cause >> 8) - 1; 1387 1388 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue]; 1389 } 1390 1391 /* Drop packets received by the RXQ and free buffers */ 1392 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, 1393 struct mvneta_rx_queue *rxq) 1394 { 1395 int rx_done, i; 1396 1397 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1398 for (i = 0; i < rxq->size; i++) { 1399 struct mvneta_rx_desc *rx_desc = rxq->descs + i; 1400 void *data = (void *)rx_desc->buf_cookie; 1401 1402 mvneta_frag_free(pp, data); 1403 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1404 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1405 } 1406 1407 if (rx_done) 1408 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1409 } 1410 1411 /* Main rx processing */ 1412 static int mvneta_rx(struct mvneta_port *pp, int rx_todo, 1413 struct mvneta_rx_queue *rxq) 1414 { 1415 struct net_device *dev = pp->dev; 1416 int rx_done, rx_filled; 1417 u32 rcvd_pkts = 0; 1418 u32 rcvd_bytes = 0; 1419 1420 /* Get number of received packets */ 1421 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1422 1423 if (rx_todo > rx_done) 1424 rx_todo = rx_done; 1425 1426 rx_done = 0; 1427 rx_filled = 0; 1428 1429 /* Fairness NAPI loop */ 1430 while (rx_done < rx_todo) { 1431 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 1432 struct sk_buff *skb; 1433 unsigned char *data; 1434 u32 rx_status; 1435 int rx_bytes, err; 1436 1437 rx_done++; 1438 rx_filled++; 1439 rx_status = rx_desc->status; 1440 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1441 data = (unsigned char *)rx_desc->buf_cookie; 1442 1443 if (!mvneta_rxq_desc_is_first_last(rx_status) || 1444 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 1445 err_drop_frame: 1446 dev->stats.rx_errors++; 1447 mvneta_rx_error(pp, rx_desc); 1448 /* leave the descriptor untouched */ 1449 continue; 1450 } 1451 1452 if (rx_bytes <= rx_copybreak) { 1453 /* better copy a small frame and not unmap the DMA region */ 1454 skb = netdev_alloc_skb_ip_align(dev, rx_bytes); 1455 if (unlikely(!skb)) 1456 goto err_drop_frame; 1457 1458 dma_sync_single_range_for_cpu(dev->dev.parent, 1459 rx_desc->buf_phys_addr, 1460 MVNETA_MH_SIZE + NET_SKB_PAD, 1461 rx_bytes, 1462 DMA_FROM_DEVICE); 1463 memcpy(skb_put(skb, rx_bytes), 1464 data + MVNETA_MH_SIZE + NET_SKB_PAD, 1465 rx_bytes); 1466 1467 skb->protocol = eth_type_trans(skb, dev); 1468 mvneta_rx_csum(pp, rx_status, skb); 1469 napi_gro_receive(&pp->napi, skb); 1470 1471 rcvd_pkts++; 1472 rcvd_bytes += rx_bytes; 1473 1474 /* leave the descriptor and buffer untouched */ 1475 continue; 1476 } 1477 1478 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1479 if (!skb) 1480 goto err_drop_frame; 1481 1482 dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, 1483 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1484 1485 rcvd_pkts++; 1486 rcvd_bytes += rx_bytes; 1487 1488 /* Linux processing */ 1489 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); 1490 skb_put(skb, rx_bytes); 1491 1492 skb->protocol = eth_type_trans(skb, dev); 1493 1494 mvneta_rx_csum(pp, rx_status, skb); 1495 1496 napi_gro_receive(&pp->napi, skb); 1497 1498 /* Refill processing */ 1499 err = mvneta_rx_refill(pp, rx_desc); 1500 if (err) { 1501 netdev_err(dev, "Linux processing - Can't refill\n"); 1502 rxq->missed++; 1503 rx_filled--; 1504 } 1505 } 1506 1507 if (rcvd_pkts) { 1508 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1509 1510 u64_stats_update_begin(&stats->syncp); 1511 stats->rx_packets += rcvd_pkts; 1512 stats->rx_bytes += rcvd_bytes; 1513 u64_stats_update_end(&stats->syncp); 1514 } 1515 1516 /* Update rxq management counters */ 1517 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); 1518 1519 return rx_done; 1520 } 1521 1522 /* Handle tx fragmentation processing */ 1523 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 1524 struct mvneta_tx_queue *txq) 1525 { 1526 struct mvneta_tx_desc *tx_desc; 1527 int i; 1528 1529 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1530 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1531 void *addr = page_address(frag->page.p) + frag->page_offset; 1532 1533 tx_desc = mvneta_txq_next_desc_get(txq); 1534 tx_desc->data_size = frag->size; 1535 1536 tx_desc->buf_phys_addr = 1537 dma_map_single(pp->dev->dev.parent, addr, 1538 tx_desc->data_size, DMA_TO_DEVICE); 1539 1540 if (dma_mapping_error(pp->dev->dev.parent, 1541 tx_desc->buf_phys_addr)) { 1542 mvneta_txq_desc_put(txq); 1543 goto error; 1544 } 1545 1546 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 1547 /* Last descriptor */ 1548 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 1549 1550 txq->tx_skb[txq->txq_put_index] = skb; 1551 1552 mvneta_txq_inc_put(txq); 1553 } else { 1554 /* Descriptor in the middle: Not First, Not Last */ 1555 tx_desc->command = 0; 1556 1557 txq->tx_skb[txq->txq_put_index] = NULL; 1558 mvneta_txq_inc_put(txq); 1559 } 1560 } 1561 1562 return 0; 1563 1564 error: 1565 /* Release all descriptors that were used to map fragments of 1566 * this packet, as well as the corresponding DMA mappings 1567 */ 1568 for (i = i - 1; i >= 0; i--) { 1569 tx_desc = txq->descs + i; 1570 dma_unmap_single(pp->dev->dev.parent, 1571 tx_desc->buf_phys_addr, 1572 tx_desc->data_size, 1573 DMA_TO_DEVICE); 1574 mvneta_txq_desc_put(txq); 1575 } 1576 1577 return -ENOMEM; 1578 } 1579 1580 /* Main tx processing */ 1581 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) 1582 { 1583 struct mvneta_port *pp = netdev_priv(dev); 1584 u16 txq_id = skb_get_queue_mapping(skb); 1585 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 1586 struct mvneta_tx_desc *tx_desc; 1587 struct netdev_queue *nq; 1588 int frags = 0; 1589 u32 tx_cmd; 1590 1591 if (!netif_running(dev)) 1592 goto out; 1593 1594 frags = skb_shinfo(skb)->nr_frags + 1; 1595 nq = netdev_get_tx_queue(dev, txq_id); 1596 1597 /* Get a descriptor for the first part of the packet */ 1598 tx_desc = mvneta_txq_next_desc_get(txq); 1599 1600 tx_cmd = mvneta_skb_tx_csum(pp, skb); 1601 1602 tx_desc->data_size = skb_headlen(skb); 1603 1604 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, 1605 tx_desc->data_size, 1606 DMA_TO_DEVICE); 1607 if (unlikely(dma_mapping_error(dev->dev.parent, 1608 tx_desc->buf_phys_addr))) { 1609 mvneta_txq_desc_put(txq); 1610 frags = 0; 1611 goto out; 1612 } 1613 1614 if (frags == 1) { 1615 /* First and Last descriptor */ 1616 tx_cmd |= MVNETA_TXD_FLZ_DESC; 1617 tx_desc->command = tx_cmd; 1618 txq->tx_skb[txq->txq_put_index] = skb; 1619 mvneta_txq_inc_put(txq); 1620 } else { 1621 /* First but not Last */ 1622 tx_cmd |= MVNETA_TXD_F_DESC; 1623 txq->tx_skb[txq->txq_put_index] = NULL; 1624 mvneta_txq_inc_put(txq); 1625 tx_desc->command = tx_cmd; 1626 /* Continue with other skb fragments */ 1627 if (mvneta_tx_frag_process(pp, skb, txq)) { 1628 dma_unmap_single(dev->dev.parent, 1629 tx_desc->buf_phys_addr, 1630 tx_desc->data_size, 1631 DMA_TO_DEVICE); 1632 mvneta_txq_desc_put(txq); 1633 frags = 0; 1634 goto out; 1635 } 1636 } 1637 1638 txq->count += frags; 1639 mvneta_txq_pend_desc_add(pp, txq, frags); 1640 1641 if (txq->size - txq->count < MAX_SKB_FRAGS + 1) 1642 netif_tx_stop_queue(nq); 1643 1644 out: 1645 if (frags > 0) { 1646 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1647 1648 u64_stats_update_begin(&stats->syncp); 1649 stats->tx_packets++; 1650 stats->tx_bytes += skb->len; 1651 u64_stats_update_end(&stats->syncp); 1652 } else { 1653 dev->stats.tx_dropped++; 1654 dev_kfree_skb_any(skb); 1655 } 1656 1657 return NETDEV_TX_OK; 1658 } 1659 1660 1661 /* Free tx resources, when resetting a port */ 1662 static void mvneta_txq_done_force(struct mvneta_port *pp, 1663 struct mvneta_tx_queue *txq) 1664 1665 { 1666 int tx_done = txq->count; 1667 1668 mvneta_txq_bufs_free(pp, txq, tx_done); 1669 1670 /* reset txq */ 1671 txq->count = 0; 1672 txq->txq_put_index = 0; 1673 txq->txq_get_index = 0; 1674 } 1675 1676 /* Handle tx done - called in softirq context. The <cause_tx_done> argument 1677 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. 1678 */ 1679 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) 1680 { 1681 struct mvneta_tx_queue *txq; 1682 struct netdev_queue *nq; 1683 1684 while (cause_tx_done) { 1685 txq = mvneta_tx_done_policy(pp, cause_tx_done); 1686 1687 nq = netdev_get_tx_queue(pp->dev, txq->id); 1688 __netif_tx_lock(nq, smp_processor_id()); 1689 1690 if (txq->count) 1691 mvneta_txq_done(pp, txq); 1692 1693 __netif_tx_unlock(nq); 1694 cause_tx_done &= ~((1 << txq->id)); 1695 } 1696 } 1697 1698 /* Compute crc8 of the specified address, using a unique algorithm , 1699 * according to hw spec, different than generic crc8 algorithm 1700 */ 1701 static int mvneta_addr_crc(unsigned char *addr) 1702 { 1703 int crc = 0; 1704 int i; 1705 1706 for (i = 0; i < ETH_ALEN; i++) { 1707 int j; 1708 1709 crc = (crc ^ addr[i]) << 8; 1710 for (j = 7; j >= 0; j--) { 1711 if (crc & (0x100 << j)) 1712 crc ^= 0x107 << j; 1713 } 1714 } 1715 1716 return crc; 1717 } 1718 1719 /* This method controls the net device special MAC multicast support. 1720 * The Special Multicast Table for MAC addresses supports MAC of the form 1721 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 1722 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 1723 * Table entries in the DA-Filter table. This method set the Special 1724 * Multicast Table appropriate entry. 1725 */ 1726 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, 1727 unsigned char last_byte, 1728 int queue) 1729 { 1730 unsigned int smc_table_reg; 1731 unsigned int tbl_offset; 1732 unsigned int reg_offset; 1733 1734 /* Register offset from SMC table base */ 1735 tbl_offset = (last_byte / 4); 1736 /* Entry offset within the above reg */ 1737 reg_offset = last_byte % 4; 1738 1739 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST 1740 + tbl_offset * 4)); 1741 1742 if (queue == -1) 1743 smc_table_reg &= ~(0xff << (8 * reg_offset)); 1744 else { 1745 smc_table_reg &= ~(0xff << (8 * reg_offset)); 1746 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1747 } 1748 1749 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, 1750 smc_table_reg); 1751 } 1752 1753 /* This method controls the network device Other MAC multicast support. 1754 * The Other Multicast Table is used for multicast of another type. 1755 * A CRC-8 is used as an index to the Other Multicast Table entries 1756 * in the DA-Filter table. 1757 * The method gets the CRC-8 value from the calling routine and 1758 * sets the Other Multicast Table appropriate entry according to the 1759 * specified CRC-8 . 1760 */ 1761 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, 1762 unsigned char crc8, 1763 int queue) 1764 { 1765 unsigned int omc_table_reg; 1766 unsigned int tbl_offset; 1767 unsigned int reg_offset; 1768 1769 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ 1770 reg_offset = crc8 % 4; /* Entry offset within the above reg */ 1771 1772 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); 1773 1774 if (queue == -1) { 1775 /* Clear accepts frame bit at specified Other DA table entry */ 1776 omc_table_reg &= ~(0xff << (8 * reg_offset)); 1777 } else { 1778 omc_table_reg &= ~(0xff << (8 * reg_offset)); 1779 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1780 } 1781 1782 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); 1783 } 1784 1785 /* The network device supports multicast using two tables: 1786 * 1) Special Multicast Table for MAC addresses of the form 1787 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 1788 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 1789 * Table entries in the DA-Filter table. 1790 * 2) Other Multicast Table for multicast of another type. A CRC-8 value 1791 * is used as an index to the Other Multicast Table entries in the 1792 * DA-Filter table. 1793 */ 1794 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, 1795 int queue) 1796 { 1797 unsigned char crc_result = 0; 1798 1799 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { 1800 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); 1801 return 0; 1802 } 1803 1804 crc_result = mvneta_addr_crc(p_addr); 1805 if (queue == -1) { 1806 if (pp->mcast_count[crc_result] == 0) { 1807 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", 1808 crc_result); 1809 return -EINVAL; 1810 } 1811 1812 pp->mcast_count[crc_result]--; 1813 if (pp->mcast_count[crc_result] != 0) { 1814 netdev_info(pp->dev, 1815 "After delete there are %d valid Mcast for crc8=0x%02x\n", 1816 pp->mcast_count[crc_result], crc_result); 1817 return -EINVAL; 1818 } 1819 } else 1820 pp->mcast_count[crc_result]++; 1821 1822 mvneta_set_other_mcast_addr(pp, crc_result, queue); 1823 1824 return 0; 1825 } 1826 1827 /* Configure Fitering mode of Ethernet port */ 1828 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, 1829 int is_promisc) 1830 { 1831 u32 port_cfg_reg, val; 1832 1833 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); 1834 1835 val = mvreg_read(pp, MVNETA_TYPE_PRIO); 1836 1837 /* Set / Clear UPM bit in port configuration register */ 1838 if (is_promisc) { 1839 /* Accept all Unicast addresses */ 1840 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; 1841 val |= MVNETA_FORCE_UNI; 1842 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); 1843 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); 1844 } else { 1845 /* Reject all Unicast addresses */ 1846 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; 1847 val &= ~MVNETA_FORCE_UNI; 1848 } 1849 1850 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); 1851 mvreg_write(pp, MVNETA_TYPE_PRIO, val); 1852 } 1853 1854 /* register unicast and multicast addresses */ 1855 static void mvneta_set_rx_mode(struct net_device *dev) 1856 { 1857 struct mvneta_port *pp = netdev_priv(dev); 1858 struct netdev_hw_addr *ha; 1859 1860 if (dev->flags & IFF_PROMISC) { 1861 /* Accept all: Multicast + Unicast */ 1862 mvneta_rx_unicast_promisc_set(pp, 1); 1863 mvneta_set_ucast_table(pp, rxq_def); 1864 mvneta_set_special_mcast_table(pp, rxq_def); 1865 mvneta_set_other_mcast_table(pp, rxq_def); 1866 } else { 1867 /* Accept single Unicast */ 1868 mvneta_rx_unicast_promisc_set(pp, 0); 1869 mvneta_set_ucast_table(pp, -1); 1870 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); 1871 1872 if (dev->flags & IFF_ALLMULTI) { 1873 /* Accept all multicast */ 1874 mvneta_set_special_mcast_table(pp, rxq_def); 1875 mvneta_set_other_mcast_table(pp, rxq_def); 1876 } else { 1877 /* Accept only initialized multicast */ 1878 mvneta_set_special_mcast_table(pp, -1); 1879 mvneta_set_other_mcast_table(pp, -1); 1880 1881 if (!netdev_mc_empty(dev)) { 1882 netdev_for_each_mc_addr(ha, dev) { 1883 mvneta_mcast_addr_set(pp, ha->addr, 1884 rxq_def); 1885 } 1886 } 1887 } 1888 } 1889 } 1890 1891 /* Interrupt handling - the callback for request_irq() */ 1892 static irqreturn_t mvneta_isr(int irq, void *dev_id) 1893 { 1894 struct mvneta_port *pp = (struct mvneta_port *)dev_id; 1895 1896 /* Mask all interrupts */ 1897 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1898 1899 napi_schedule(&pp->napi); 1900 1901 return IRQ_HANDLED; 1902 } 1903 1904 /* NAPI handler 1905 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 1906 * packets on the corresponding TXQ (Bit 0 is for TX queue 1). 1907 * Bits 8 -15 of the cause Rx Tx register indicate that are received 1908 * packets on the corresponding RXQ (Bit 8 is for RX queue 0). 1909 * Each CPU has its own causeRxTx register 1910 */ 1911 static int mvneta_poll(struct napi_struct *napi, int budget) 1912 { 1913 int rx_done = 0; 1914 u32 cause_rx_tx; 1915 unsigned long flags; 1916 struct mvneta_port *pp = netdev_priv(napi->dev); 1917 1918 if (!netif_running(pp->dev)) { 1919 napi_complete(napi); 1920 return rx_done; 1921 } 1922 1923 /* Read cause register */ 1924 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) & 1925 (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 1926 1927 /* Release Tx descriptors */ 1928 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { 1929 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); 1930 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; 1931 } 1932 1933 /* For the case where the last mvneta_poll did not process all 1934 * RX packets 1935 */ 1936 cause_rx_tx |= pp->cause_rx_tx; 1937 if (rxq_number > 1) { 1938 while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) { 1939 int count; 1940 struct mvneta_rx_queue *rxq; 1941 /* get rx queue number from cause_rx_tx */ 1942 rxq = mvneta_rx_policy(pp, cause_rx_tx); 1943 if (!rxq) 1944 break; 1945 1946 /* process the packet in that rx queue */ 1947 count = mvneta_rx(pp, budget, rxq); 1948 rx_done += count; 1949 budget -= count; 1950 if (budget > 0) { 1951 /* set off the rx bit of the 1952 * corresponding bit in the cause rx 1953 * tx register, so that next iteration 1954 * will find the next rx queue where 1955 * packets are received on 1956 */ 1957 cause_rx_tx &= ~((1 << rxq->id) << 8); 1958 } 1959 } 1960 } else { 1961 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); 1962 budget -= rx_done; 1963 } 1964 1965 if (budget > 0) { 1966 cause_rx_tx = 0; 1967 napi_complete(napi); 1968 local_irq_save(flags); 1969 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 1970 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 1971 local_irq_restore(flags); 1972 } 1973 1974 pp->cause_rx_tx = cause_rx_tx; 1975 return rx_done; 1976 } 1977 1978 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ 1979 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 1980 int num) 1981 { 1982 int i; 1983 1984 for (i = 0; i < num; i++) { 1985 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); 1986 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) { 1987 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", 1988 __func__, rxq->id, i, num); 1989 break; 1990 } 1991 } 1992 1993 /* Add this number of RX descriptors as non occupied (ready to 1994 * get packets) 1995 */ 1996 mvneta_rxq_non_occup_desc_add(pp, rxq, i); 1997 1998 return i; 1999 } 2000 2001 /* Free all packets pending transmit from all TXQs and reset TX port */ 2002 static void mvneta_tx_reset(struct mvneta_port *pp) 2003 { 2004 int queue; 2005 2006 /* free the skb's in the hal tx ring */ 2007 for (queue = 0; queue < txq_number; queue++) 2008 mvneta_txq_done_force(pp, &pp->txqs[queue]); 2009 2010 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 2011 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 2012 } 2013 2014 static void mvneta_rx_reset(struct mvneta_port *pp) 2015 { 2016 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 2017 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 2018 } 2019 2020 /* Rx/Tx queue initialization/cleanup methods */ 2021 2022 /* Create a specified RX queue */ 2023 static int mvneta_rxq_init(struct mvneta_port *pp, 2024 struct mvneta_rx_queue *rxq) 2025 2026 { 2027 rxq->size = pp->rx_ring_size; 2028 2029 /* Allocate memory for RX descriptors */ 2030 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2031 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 2032 &rxq->descs_phys, GFP_KERNEL); 2033 if (rxq->descs == NULL) 2034 return -ENOMEM; 2035 2036 BUG_ON(rxq->descs != 2037 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); 2038 2039 rxq->last_desc = rxq->size - 1; 2040 2041 /* Set Rx descriptors queue starting address */ 2042 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); 2043 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); 2044 2045 /* Set Offset */ 2046 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD); 2047 2048 /* Set coalescing pkts and time */ 2049 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 2050 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 2051 2052 /* Fill RXQ with buffers from RX pool */ 2053 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); 2054 mvneta_rxq_bm_disable(pp, rxq); 2055 mvneta_rxq_fill(pp, rxq, rxq->size); 2056 2057 return 0; 2058 } 2059 2060 /* Cleanup Rx queue */ 2061 static void mvneta_rxq_deinit(struct mvneta_port *pp, 2062 struct mvneta_rx_queue *rxq) 2063 { 2064 mvneta_rxq_drop_pkts(pp, rxq); 2065 2066 if (rxq->descs) 2067 dma_free_coherent(pp->dev->dev.parent, 2068 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 2069 rxq->descs, 2070 rxq->descs_phys); 2071 2072 rxq->descs = NULL; 2073 rxq->last_desc = 0; 2074 rxq->next_desc_to_proc = 0; 2075 rxq->descs_phys = 0; 2076 } 2077 2078 /* Create and initialize a tx queue */ 2079 static int mvneta_txq_init(struct mvneta_port *pp, 2080 struct mvneta_tx_queue *txq) 2081 { 2082 txq->size = pp->tx_ring_size; 2083 2084 /* Allocate memory for TX descriptors */ 2085 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2086 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2087 &txq->descs_phys, GFP_KERNEL); 2088 if (txq->descs == NULL) 2089 return -ENOMEM; 2090 2091 /* Make sure descriptor address is cache line size aligned */ 2092 BUG_ON(txq->descs != 2093 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); 2094 2095 txq->last_desc = txq->size - 1; 2096 2097 /* Set maximum bandwidth for enabled TXQs */ 2098 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); 2099 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); 2100 2101 /* Set Tx descriptors queue starting address */ 2102 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); 2103 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); 2104 2105 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL); 2106 if (txq->tx_skb == NULL) { 2107 dma_free_coherent(pp->dev->dev.parent, 2108 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2109 txq->descs, txq->descs_phys); 2110 return -ENOMEM; 2111 } 2112 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 2113 2114 return 0; 2115 } 2116 2117 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ 2118 static void mvneta_txq_deinit(struct mvneta_port *pp, 2119 struct mvneta_tx_queue *txq) 2120 { 2121 kfree(txq->tx_skb); 2122 2123 if (txq->descs) 2124 dma_free_coherent(pp->dev->dev.parent, 2125 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2126 txq->descs, txq->descs_phys); 2127 2128 txq->descs = NULL; 2129 txq->last_desc = 0; 2130 txq->next_desc_to_proc = 0; 2131 txq->descs_phys = 0; 2132 2133 /* Set minimum bandwidth for disabled TXQs */ 2134 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); 2135 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); 2136 2137 /* Set Tx descriptors queue starting address and size */ 2138 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); 2139 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); 2140 } 2141 2142 /* Cleanup all Tx queues */ 2143 static void mvneta_cleanup_txqs(struct mvneta_port *pp) 2144 { 2145 int queue; 2146 2147 for (queue = 0; queue < txq_number; queue++) 2148 mvneta_txq_deinit(pp, &pp->txqs[queue]); 2149 } 2150 2151 /* Cleanup all Rx queues */ 2152 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) 2153 { 2154 int queue; 2155 2156 for (queue = 0; queue < rxq_number; queue++) 2157 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 2158 } 2159 2160 2161 /* Init all Rx queues */ 2162 static int mvneta_setup_rxqs(struct mvneta_port *pp) 2163 { 2164 int queue; 2165 2166 for (queue = 0; queue < rxq_number; queue++) { 2167 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); 2168 if (err) { 2169 netdev_err(pp->dev, "%s: can't create rxq=%d\n", 2170 __func__, queue); 2171 mvneta_cleanup_rxqs(pp); 2172 return err; 2173 } 2174 } 2175 2176 return 0; 2177 } 2178 2179 /* Init all tx queues */ 2180 static int mvneta_setup_txqs(struct mvneta_port *pp) 2181 { 2182 int queue; 2183 2184 for (queue = 0; queue < txq_number; queue++) { 2185 int err = mvneta_txq_init(pp, &pp->txqs[queue]); 2186 if (err) { 2187 netdev_err(pp->dev, "%s: can't create txq=%d\n", 2188 __func__, queue); 2189 mvneta_cleanup_txqs(pp); 2190 return err; 2191 } 2192 } 2193 2194 return 0; 2195 } 2196 2197 static void mvneta_start_dev(struct mvneta_port *pp) 2198 { 2199 mvneta_max_rx_size_set(pp, pp->pkt_size); 2200 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 2201 2202 /* start the Rx/Tx activity */ 2203 mvneta_port_enable(pp); 2204 2205 /* Enable polling on the port */ 2206 napi_enable(&pp->napi); 2207 2208 /* Unmask interrupts */ 2209 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2210 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 2211 2212 phy_start(pp->phy_dev); 2213 netif_tx_start_all_queues(pp->dev); 2214 } 2215 2216 static void mvneta_stop_dev(struct mvneta_port *pp) 2217 { 2218 phy_stop(pp->phy_dev); 2219 2220 napi_disable(&pp->napi); 2221 2222 netif_carrier_off(pp->dev); 2223 2224 mvneta_port_down(pp); 2225 netif_tx_stop_all_queues(pp->dev); 2226 2227 /* Stop the port activity */ 2228 mvneta_port_disable(pp); 2229 2230 /* Clear all ethernet port interrupts */ 2231 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2232 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 2233 2234 /* Mask all ethernet port interrupts */ 2235 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2236 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2237 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 2238 2239 mvneta_tx_reset(pp); 2240 mvneta_rx_reset(pp); 2241 } 2242 2243 /* Return positive if MTU is valid */ 2244 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu) 2245 { 2246 if (mtu < 68) { 2247 netdev_err(dev, "cannot change mtu to less than 68\n"); 2248 return -EINVAL; 2249 } 2250 2251 /* 9676 == 9700 - 20 and rounding to 8 */ 2252 if (mtu > 9676) { 2253 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu); 2254 mtu = 9676; 2255 } 2256 2257 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { 2258 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", 2259 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); 2260 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); 2261 } 2262 2263 return mtu; 2264 } 2265 2266 /* Change the device mtu */ 2267 static int mvneta_change_mtu(struct net_device *dev, int mtu) 2268 { 2269 struct mvneta_port *pp = netdev_priv(dev); 2270 int ret; 2271 2272 mtu = mvneta_check_mtu_valid(dev, mtu); 2273 if (mtu < 0) 2274 return -EINVAL; 2275 2276 dev->mtu = mtu; 2277 2278 if (!netif_running(dev)) 2279 return 0; 2280 2281 /* The interface is running, so we have to force a 2282 * reallocation of the RXQs 2283 */ 2284 mvneta_stop_dev(pp); 2285 2286 mvneta_cleanup_txqs(pp); 2287 mvneta_cleanup_rxqs(pp); 2288 2289 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2290 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2291 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2292 2293 ret = mvneta_setup_rxqs(pp); 2294 if (ret) { 2295 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n"); 2296 return ret; 2297 } 2298 2299 mvneta_setup_txqs(pp); 2300 2301 mvneta_start_dev(pp); 2302 mvneta_port_up(pp); 2303 2304 return 0; 2305 } 2306 2307 /* Get mac address */ 2308 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) 2309 { 2310 u32 mac_addr_l, mac_addr_h; 2311 2312 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); 2313 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); 2314 addr[0] = (mac_addr_h >> 24) & 0xFF; 2315 addr[1] = (mac_addr_h >> 16) & 0xFF; 2316 addr[2] = (mac_addr_h >> 8) & 0xFF; 2317 addr[3] = mac_addr_h & 0xFF; 2318 addr[4] = (mac_addr_l >> 8) & 0xFF; 2319 addr[5] = mac_addr_l & 0xFF; 2320 } 2321 2322 /* Handle setting mac address */ 2323 static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 2324 { 2325 struct mvneta_port *pp = netdev_priv(dev); 2326 u8 *mac = addr + 2; 2327 int i; 2328 2329 if (netif_running(dev)) 2330 return -EBUSY; 2331 2332 /* Remove previous address table entry */ 2333 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 2334 2335 /* Set new addr in hw */ 2336 mvneta_mac_addr_set(pp, mac, rxq_def); 2337 2338 /* Set addr in the device */ 2339 for (i = 0; i < ETH_ALEN; i++) 2340 dev->dev_addr[i] = mac[i]; 2341 2342 return 0; 2343 } 2344 2345 static void mvneta_adjust_link(struct net_device *ndev) 2346 { 2347 struct mvneta_port *pp = netdev_priv(ndev); 2348 struct phy_device *phydev = pp->phy_dev; 2349 int status_change = 0; 2350 2351 if (phydev->link) { 2352 if ((pp->speed != phydev->speed) || 2353 (pp->duplex != phydev->duplex)) { 2354 u32 val; 2355 2356 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2357 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | 2358 MVNETA_GMAC_CONFIG_GMII_SPEED | 2359 MVNETA_GMAC_CONFIG_FULL_DUPLEX | 2360 MVNETA_GMAC_AN_SPEED_EN | 2361 MVNETA_GMAC_AN_DUPLEX_EN); 2362 2363 if (phydev->duplex) 2364 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 2365 2366 if (phydev->speed == SPEED_1000) 2367 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 2368 else 2369 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 2370 2371 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2372 2373 pp->duplex = phydev->duplex; 2374 pp->speed = phydev->speed; 2375 } 2376 } 2377 2378 if (phydev->link != pp->link) { 2379 if (!phydev->link) { 2380 pp->duplex = -1; 2381 pp->speed = 0; 2382 } 2383 2384 pp->link = phydev->link; 2385 status_change = 1; 2386 } 2387 2388 if (status_change) { 2389 if (phydev->link) { 2390 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2391 val |= (MVNETA_GMAC_FORCE_LINK_PASS | 2392 MVNETA_GMAC_FORCE_LINK_DOWN); 2393 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2394 mvneta_port_up(pp); 2395 netdev_info(pp->dev, "link up\n"); 2396 } else { 2397 mvneta_port_down(pp); 2398 netdev_info(pp->dev, "link down\n"); 2399 } 2400 } 2401 } 2402 2403 static int mvneta_mdio_probe(struct mvneta_port *pp) 2404 { 2405 struct phy_device *phy_dev; 2406 2407 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, 2408 pp->phy_interface); 2409 if (!phy_dev) { 2410 netdev_err(pp->dev, "could not find the PHY\n"); 2411 return -ENODEV; 2412 } 2413 2414 phy_dev->supported &= PHY_GBIT_FEATURES; 2415 phy_dev->advertising = phy_dev->supported; 2416 2417 pp->phy_dev = phy_dev; 2418 pp->link = 0; 2419 pp->duplex = 0; 2420 pp->speed = 0; 2421 2422 return 0; 2423 } 2424 2425 static void mvneta_mdio_remove(struct mvneta_port *pp) 2426 { 2427 phy_disconnect(pp->phy_dev); 2428 pp->phy_dev = NULL; 2429 } 2430 2431 static int mvneta_open(struct net_device *dev) 2432 { 2433 struct mvneta_port *pp = netdev_priv(dev); 2434 int ret; 2435 2436 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); 2437 2438 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2439 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 2440 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2441 2442 ret = mvneta_setup_rxqs(pp); 2443 if (ret) 2444 return ret; 2445 2446 ret = mvneta_setup_txqs(pp); 2447 if (ret) 2448 goto err_cleanup_rxqs; 2449 2450 /* Connect to port interrupt line */ 2451 ret = request_irq(pp->dev->irq, mvneta_isr, 0, 2452 MVNETA_DRIVER_NAME, pp); 2453 if (ret) { 2454 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); 2455 goto err_cleanup_txqs; 2456 } 2457 2458 /* In default link is down */ 2459 netif_carrier_off(pp->dev); 2460 2461 ret = mvneta_mdio_probe(pp); 2462 if (ret < 0) { 2463 netdev_err(dev, "cannot probe MDIO bus\n"); 2464 goto err_free_irq; 2465 } 2466 2467 mvneta_start_dev(pp); 2468 2469 return 0; 2470 2471 err_free_irq: 2472 free_irq(pp->dev->irq, pp); 2473 err_cleanup_txqs: 2474 mvneta_cleanup_txqs(pp); 2475 err_cleanup_rxqs: 2476 mvneta_cleanup_rxqs(pp); 2477 return ret; 2478 } 2479 2480 /* Stop the port, free port interrupt line */ 2481 static int mvneta_stop(struct net_device *dev) 2482 { 2483 struct mvneta_port *pp = netdev_priv(dev); 2484 2485 mvneta_stop_dev(pp); 2486 mvneta_mdio_remove(pp); 2487 free_irq(dev->irq, pp); 2488 mvneta_cleanup_rxqs(pp); 2489 mvneta_cleanup_txqs(pp); 2490 2491 return 0; 2492 } 2493 2494 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2495 { 2496 struct mvneta_port *pp = netdev_priv(dev); 2497 int ret; 2498 2499 if (!pp->phy_dev) 2500 return -ENOTSUPP; 2501 2502 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); 2503 if (!ret) 2504 mvneta_adjust_link(dev); 2505 2506 return ret; 2507 } 2508 2509 /* Ethtool methods */ 2510 2511 /* Get settings (phy address, speed) for ethtools */ 2512 int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2513 { 2514 struct mvneta_port *pp = netdev_priv(dev); 2515 2516 if (!pp->phy_dev) 2517 return -ENODEV; 2518 2519 return phy_ethtool_gset(pp->phy_dev, cmd); 2520 } 2521 2522 /* Set settings (phy address, speed) for ethtools */ 2523 int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2524 { 2525 struct mvneta_port *pp = netdev_priv(dev); 2526 2527 if (!pp->phy_dev) 2528 return -ENODEV; 2529 2530 return phy_ethtool_sset(pp->phy_dev, cmd); 2531 } 2532 2533 /* Set interrupt coalescing for ethtools */ 2534 static int mvneta_ethtool_set_coalesce(struct net_device *dev, 2535 struct ethtool_coalesce *c) 2536 { 2537 struct mvneta_port *pp = netdev_priv(dev); 2538 int queue; 2539 2540 for (queue = 0; queue < rxq_number; queue++) { 2541 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 2542 rxq->time_coal = c->rx_coalesce_usecs; 2543 rxq->pkts_coal = c->rx_max_coalesced_frames; 2544 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 2545 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 2546 } 2547 2548 for (queue = 0; queue < txq_number; queue++) { 2549 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 2550 txq->done_pkts_coal = c->tx_max_coalesced_frames; 2551 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 2552 } 2553 2554 return 0; 2555 } 2556 2557 /* get coalescing for ethtools */ 2558 static int mvneta_ethtool_get_coalesce(struct net_device *dev, 2559 struct ethtool_coalesce *c) 2560 { 2561 struct mvneta_port *pp = netdev_priv(dev); 2562 2563 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; 2564 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; 2565 2566 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; 2567 return 0; 2568 } 2569 2570 2571 static void mvneta_ethtool_get_drvinfo(struct net_device *dev, 2572 struct ethtool_drvinfo *drvinfo) 2573 { 2574 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, 2575 sizeof(drvinfo->driver)); 2576 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, 2577 sizeof(drvinfo->version)); 2578 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 2579 sizeof(drvinfo->bus_info)); 2580 } 2581 2582 2583 static void mvneta_ethtool_get_ringparam(struct net_device *netdev, 2584 struct ethtool_ringparam *ring) 2585 { 2586 struct mvneta_port *pp = netdev_priv(netdev); 2587 2588 ring->rx_max_pending = MVNETA_MAX_RXD; 2589 ring->tx_max_pending = MVNETA_MAX_TXD; 2590 ring->rx_pending = pp->rx_ring_size; 2591 ring->tx_pending = pp->tx_ring_size; 2592 } 2593 2594 static int mvneta_ethtool_set_ringparam(struct net_device *dev, 2595 struct ethtool_ringparam *ring) 2596 { 2597 struct mvneta_port *pp = netdev_priv(dev); 2598 2599 if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) 2600 return -EINVAL; 2601 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 2602 ring->rx_pending : MVNETA_MAX_RXD; 2603 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ? 2604 ring->tx_pending : MVNETA_MAX_TXD; 2605 2606 if (netif_running(dev)) { 2607 mvneta_stop(dev); 2608 if (mvneta_open(dev)) { 2609 netdev_err(dev, 2610 "error on opening device after ring param change\n"); 2611 return -ENOMEM; 2612 } 2613 } 2614 2615 return 0; 2616 } 2617 2618 static const struct net_device_ops mvneta_netdev_ops = { 2619 .ndo_open = mvneta_open, 2620 .ndo_stop = mvneta_stop, 2621 .ndo_start_xmit = mvneta_tx, 2622 .ndo_set_rx_mode = mvneta_set_rx_mode, 2623 .ndo_set_mac_address = mvneta_set_mac_addr, 2624 .ndo_change_mtu = mvneta_change_mtu, 2625 .ndo_get_stats64 = mvneta_get_stats64, 2626 .ndo_do_ioctl = mvneta_ioctl, 2627 }; 2628 2629 const struct ethtool_ops mvneta_eth_tool_ops = { 2630 .get_link = ethtool_op_get_link, 2631 .get_settings = mvneta_ethtool_get_settings, 2632 .set_settings = mvneta_ethtool_set_settings, 2633 .set_coalesce = mvneta_ethtool_set_coalesce, 2634 .get_coalesce = mvneta_ethtool_get_coalesce, 2635 .get_drvinfo = mvneta_ethtool_get_drvinfo, 2636 .get_ringparam = mvneta_ethtool_get_ringparam, 2637 .set_ringparam = mvneta_ethtool_set_ringparam, 2638 }; 2639 2640 /* Initialize hw */ 2641 static int mvneta_init(struct mvneta_port *pp, int phy_addr) 2642 { 2643 int queue; 2644 2645 /* Disable port */ 2646 mvneta_port_disable(pp); 2647 2648 /* Set port default values */ 2649 mvneta_defaults_set(pp); 2650 2651 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), 2652 GFP_KERNEL); 2653 if (!pp->txqs) 2654 return -ENOMEM; 2655 2656 /* Initialize TX descriptor rings */ 2657 for (queue = 0; queue < txq_number; queue++) { 2658 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 2659 txq->id = queue; 2660 txq->size = pp->tx_ring_size; 2661 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 2662 } 2663 2664 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), 2665 GFP_KERNEL); 2666 if (!pp->rxqs) { 2667 kfree(pp->txqs); 2668 return -ENOMEM; 2669 } 2670 2671 /* Create Rx descriptor rings */ 2672 for (queue = 0; queue < rxq_number; queue++) { 2673 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 2674 rxq->id = queue; 2675 rxq->size = pp->rx_ring_size; 2676 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; 2677 rxq->time_coal = MVNETA_RX_COAL_USEC; 2678 } 2679 2680 return 0; 2681 } 2682 2683 static void mvneta_deinit(struct mvneta_port *pp) 2684 { 2685 kfree(pp->txqs); 2686 kfree(pp->rxqs); 2687 } 2688 2689 /* platform glue : initialize decoding windows */ 2690 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 2691 const struct mbus_dram_target_info *dram) 2692 { 2693 u32 win_enable; 2694 u32 win_protect; 2695 int i; 2696 2697 for (i = 0; i < 6; i++) { 2698 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 2699 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 2700 2701 if (i < 4) 2702 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 2703 } 2704 2705 win_enable = 0x3f; 2706 win_protect = 0; 2707 2708 for (i = 0; i < dram->num_cs; i++) { 2709 const struct mbus_dram_window *cs = dram->cs + i; 2710 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | 2711 (cs->mbus_attr << 8) | dram->mbus_dram_target_id); 2712 2713 mvreg_write(pp, MVNETA_WIN_SIZE(i), 2714 (cs->size - 1) & 0xffff0000); 2715 2716 win_enable &= ~(1 << i); 2717 win_protect |= 3 << (2 * i); 2718 } 2719 2720 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 2721 } 2722 2723 /* Power up the port */ 2724 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 2725 { 2726 u32 ctrl; 2727 2728 /* MAC Cause register should be cleared */ 2729 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 2730 2731 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 2732 2733 /* Even though it might look weird, when we're configured in 2734 * SGMII or QSGMII mode, the RGMII bit needs to be set. 2735 */ 2736 switch(phy_mode) { 2737 case PHY_INTERFACE_MODE_QSGMII: 2738 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); 2739 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; 2740 break; 2741 case PHY_INTERFACE_MODE_SGMII: 2742 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); 2743 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; 2744 break; 2745 case PHY_INTERFACE_MODE_RGMII: 2746 case PHY_INTERFACE_MODE_RGMII_ID: 2747 ctrl |= MVNETA_GMAC2_PORT_RGMII; 2748 break; 2749 default: 2750 return -EINVAL; 2751 } 2752 2753 /* Cancel Port Reset */ 2754 ctrl &= ~MVNETA_GMAC2_PORT_RESET; 2755 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); 2756 2757 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 2758 MVNETA_GMAC2_PORT_RESET) != 0) 2759 continue; 2760 2761 return 0; 2762 } 2763 2764 /* Device initialization routine */ 2765 static int mvneta_probe(struct platform_device *pdev) 2766 { 2767 const struct mbus_dram_target_info *dram_target_info; 2768 struct resource *res; 2769 struct device_node *dn = pdev->dev.of_node; 2770 struct device_node *phy_node; 2771 u32 phy_addr; 2772 struct mvneta_port *pp; 2773 struct net_device *dev; 2774 const char *dt_mac_addr; 2775 char hw_mac_addr[ETH_ALEN]; 2776 const char *mac_from; 2777 int phy_mode; 2778 int err; 2779 2780 /* Our multiqueue support is not complete, so for now, only 2781 * allow the usage of the first RX queue 2782 */ 2783 if (rxq_def != 0) { 2784 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def); 2785 return -EINVAL; 2786 } 2787 2788 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); 2789 if (!dev) 2790 return -ENOMEM; 2791 2792 dev->irq = irq_of_parse_and_map(dn, 0); 2793 if (dev->irq == 0) { 2794 err = -EINVAL; 2795 goto err_free_netdev; 2796 } 2797 2798 phy_node = of_parse_phandle(dn, "phy", 0); 2799 if (!phy_node) { 2800 dev_err(&pdev->dev, "no associated PHY\n"); 2801 err = -ENODEV; 2802 goto err_free_irq; 2803 } 2804 2805 phy_mode = of_get_phy_mode(dn); 2806 if (phy_mode < 0) { 2807 dev_err(&pdev->dev, "incorrect phy-mode\n"); 2808 err = -EINVAL; 2809 goto err_free_irq; 2810 } 2811 2812 dev->tx_queue_len = MVNETA_MAX_TXD; 2813 dev->watchdog_timeo = 5 * HZ; 2814 dev->netdev_ops = &mvneta_netdev_ops; 2815 2816 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops); 2817 2818 pp = netdev_priv(dev); 2819 2820 pp->weight = MVNETA_RX_POLL_WEIGHT; 2821 pp->phy_node = phy_node; 2822 pp->phy_interface = phy_mode; 2823 2824 pp->clk = devm_clk_get(&pdev->dev, NULL); 2825 if (IS_ERR(pp->clk)) { 2826 err = PTR_ERR(pp->clk); 2827 goto err_free_irq; 2828 } 2829 2830 clk_prepare_enable(pp->clk); 2831 2832 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2833 pp->base = devm_ioremap_resource(&pdev->dev, res); 2834 if (IS_ERR(pp->base)) { 2835 err = PTR_ERR(pp->base); 2836 goto err_clk; 2837 } 2838 2839 /* Alloc per-cpu stats */ 2840 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); 2841 if (!pp->stats) { 2842 err = -ENOMEM; 2843 goto err_clk; 2844 } 2845 2846 dt_mac_addr = of_get_mac_address(dn); 2847 if (dt_mac_addr) { 2848 mac_from = "device tree"; 2849 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN); 2850 } else { 2851 mvneta_get_mac_addr(pp, hw_mac_addr); 2852 if (is_valid_ether_addr(hw_mac_addr)) { 2853 mac_from = "hardware"; 2854 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); 2855 } else { 2856 mac_from = "random"; 2857 eth_hw_addr_random(dev); 2858 } 2859 } 2860 2861 pp->tx_ring_size = MVNETA_MAX_TXD; 2862 pp->rx_ring_size = MVNETA_MAX_RXD; 2863 2864 pp->dev = dev; 2865 SET_NETDEV_DEV(dev, &pdev->dev); 2866 2867 err = mvneta_init(pp, phy_addr); 2868 if (err < 0) { 2869 dev_err(&pdev->dev, "can't init eth hal\n"); 2870 goto err_free_stats; 2871 } 2872 2873 err = mvneta_port_power_up(pp, phy_mode); 2874 if (err < 0) { 2875 dev_err(&pdev->dev, "can't power up port\n"); 2876 goto err_deinit; 2877 } 2878 2879 dram_target_info = mv_mbus_dram_info(); 2880 if (dram_target_info) 2881 mvneta_conf_mbus_windows(pp, dram_target_info); 2882 2883 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); 2884 2885 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2886 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; 2887 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; 2888 dev->priv_flags |= IFF_UNICAST_FLT; 2889 2890 err = register_netdev(dev); 2891 if (err < 0) { 2892 dev_err(&pdev->dev, "failed to register\n"); 2893 goto err_deinit; 2894 } 2895 2896 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 2897 dev->dev_addr); 2898 2899 platform_set_drvdata(pdev, pp->dev); 2900 2901 return 0; 2902 2903 err_deinit: 2904 mvneta_deinit(pp); 2905 err_free_stats: 2906 free_percpu(pp->stats); 2907 err_clk: 2908 clk_disable_unprepare(pp->clk); 2909 err_free_irq: 2910 irq_dispose_mapping(dev->irq); 2911 err_free_netdev: 2912 free_netdev(dev); 2913 return err; 2914 } 2915 2916 /* Device removal routine */ 2917 static int mvneta_remove(struct platform_device *pdev) 2918 { 2919 struct net_device *dev = platform_get_drvdata(pdev); 2920 struct mvneta_port *pp = netdev_priv(dev); 2921 2922 unregister_netdev(dev); 2923 mvneta_deinit(pp); 2924 clk_disable_unprepare(pp->clk); 2925 free_percpu(pp->stats); 2926 irq_dispose_mapping(dev->irq); 2927 free_netdev(dev); 2928 2929 return 0; 2930 } 2931 2932 static const struct of_device_id mvneta_match[] = { 2933 { .compatible = "marvell,armada-370-neta" }, 2934 { } 2935 }; 2936 MODULE_DEVICE_TABLE(of, mvneta_match); 2937 2938 static struct platform_driver mvneta_driver = { 2939 .probe = mvneta_probe, 2940 .remove = mvneta_remove, 2941 .driver = { 2942 .name = MVNETA_DRIVER_NAME, 2943 .of_match_table = mvneta_match, 2944 }, 2945 }; 2946 2947 module_platform_driver(mvneta_driver); 2948 2949 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 2950 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 2951 MODULE_LICENSE("GPL"); 2952 2953 module_param(rxq_number, int, S_IRUGO); 2954 module_param(txq_number, int, S_IRUGO); 2955 2956 module_param(rxq_def, int, S_IRUGO); 2957 module_param(rx_copybreak, int, S_IRUGO | S_IWUSR); 2958