1 /* 2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Rami Rosen <rosenr@marvell.com> 7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 8 * 9 * This file is licensed under the terms of the GNU General Public 10 * License version 2. This program is licensed "as is" without any 11 * warranty of any kind, whether express or implied. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/netdevice.h> 16 #include <linux/etherdevice.h> 17 #include <linux/platform_device.h> 18 #include <linux/skbuff.h> 19 #include <linux/inetdevice.h> 20 #include <linux/mbus.h> 21 #include <linux/module.h> 22 #include <linux/interrupt.h> 23 #include <net/ip.h> 24 #include <net/ipv6.h> 25 #include <linux/of.h> 26 #include <linux/of_irq.h> 27 #include <linux/of_mdio.h> 28 #include <linux/of_net.h> 29 #include <linux/of_address.h> 30 #include <linux/phy.h> 31 #include <linux/clk.h> 32 33 /* Registers */ 34 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 35 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) 36 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 37 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 38 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 39 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) 40 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) 41 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) 42 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 43 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) 44 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) 45 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff 46 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) 47 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 48 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 49 #define MVNETA_PORT_RX_RESET 0x1cc0 50 #define MVNETA_PORT_RX_DMA_RESET BIT(0) 51 #define MVNETA_PHY_ADDR 0x2000 52 #define MVNETA_PHY_ADDR_MASK 0x1f 53 #define MVNETA_MBUS_RETRY 0x2010 54 #define MVNETA_UNIT_INTR_CAUSE 0x2080 55 #define MVNETA_UNIT_CONTROL 0x20B0 56 #define MVNETA_PHY_POLLING_ENABLE BIT(1) 57 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) 58 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 59 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 60 #define MVNETA_BASE_ADDR_ENABLE 0x2290 61 #define MVNETA_PORT_CONFIG 0x2400 62 #define MVNETA_UNI_PROMISC_MODE BIT(0) 63 #define MVNETA_DEF_RXQ(q) ((q) << 1) 64 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) 65 #define MVNETA_TX_UNSET_ERR_SUM BIT(12) 66 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) 67 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) 68 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) 69 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) 70 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ 71 MVNETA_DEF_RXQ_ARP(q) | \ 72 MVNETA_DEF_RXQ_TCP(q) | \ 73 MVNETA_DEF_RXQ_UDP(q) | \ 74 MVNETA_DEF_RXQ_BPDU(q) | \ 75 MVNETA_TX_UNSET_ERR_SUM | \ 76 MVNETA_RX_CSUM_WITH_PSEUDO_HDR) 77 #define MVNETA_PORT_CONFIG_EXTEND 0x2404 78 #define MVNETA_MAC_ADDR_LOW 0x2414 79 #define MVNETA_MAC_ADDR_HIGH 0x2418 80 #define MVNETA_SDMA_CONFIG 0x241c 81 #define MVNETA_SDMA_BRST_SIZE_16 4 82 #define MVNETA_NO_DESC_SWAP 0x0 83 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) 84 #define MVNETA_RX_NO_DATA_SWAP BIT(4) 85 #define MVNETA_TX_NO_DATA_SWAP BIT(5) 86 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) 87 #define MVNETA_PORT_STATUS 0x2444 88 #define MVNETA_TX_IN_PRGRS BIT(1) 89 #define MVNETA_TX_FIFO_EMPTY BIT(8) 90 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c 91 #define MVNETA_TYPE_PRIO 0x24bc 92 #define MVNETA_FORCE_UNI BIT(21) 93 #define MVNETA_TXQ_CMD_1 0x24e4 94 #define MVNETA_TXQ_CMD 0x2448 95 #define MVNETA_TXQ_DISABLE_SHIFT 8 96 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff 97 #define MVNETA_ACC_MODE 0x2500 98 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 99 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff 100 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 101 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) 102 #define MVNETA_INTR_NEW_CAUSE 0x25a0 103 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 104 #define MVNETA_INTR_NEW_MASK 0x25a4 105 #define MVNETA_INTR_OLD_CAUSE 0x25a8 106 #define MVNETA_INTR_OLD_MASK 0x25ac 107 #define MVNETA_INTR_MISC_CAUSE 0x25b0 108 #define MVNETA_INTR_MISC_MASK 0x25b4 109 #define MVNETA_INTR_ENABLE 0x25b8 110 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 111 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 112 #define MVNETA_RXQ_CMD 0x2680 113 #define MVNETA_RXQ_DISABLE_SHIFT 8 114 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff 115 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) 116 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) 117 #define MVNETA_GMAC_CTRL_0 0x2c00 118 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 119 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 120 #define MVNETA_GMAC0_PORT_ENABLE BIT(0) 121 #define MVNETA_GMAC_CTRL_2 0x2c08 122 #define MVNETA_GMAC2_PSC_ENABLE BIT(3) 123 #define MVNETA_GMAC2_PORT_RGMII BIT(4) 124 #define MVNETA_GMAC2_PORT_RESET BIT(6) 125 #define MVNETA_GMAC_STATUS 0x2c10 126 #define MVNETA_GMAC_LINK_UP BIT(0) 127 #define MVNETA_GMAC_SPEED_1000 BIT(1) 128 #define MVNETA_GMAC_SPEED_100 BIT(2) 129 #define MVNETA_GMAC_FULL_DUPLEX BIT(3) 130 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) 131 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) 132 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) 133 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) 134 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 135 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 136 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 137 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 138 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 139 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 140 #define MVNETA_MIB_COUNTERS_BASE 0x3080 141 #define MVNETA_MIB_LATE_COLLISION 0x7c 142 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 143 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 144 #define MVNETA_DA_FILT_UCAST_BASE 0x3600 145 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) 146 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) 147 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 148 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) 149 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) 150 #define MVNETA_TXQ_DEC_SENT_SHIFT 16 151 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) 152 #define MVNETA_TXQ_SENT_DESC_SHIFT 16 153 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 154 #define MVNETA_PORT_TX_RESET 0x3cf0 155 #define MVNETA_PORT_TX_DMA_RESET BIT(0) 156 #define MVNETA_TX_MTU 0x3e0c 157 #define MVNETA_TX_TOKEN_SIZE 0x3e14 158 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff 159 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) 160 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff 161 162 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 163 164 /* Descriptor ring Macros */ 165 #define MVNETA_QUEUE_NEXT_DESC(q, index) \ 166 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 167 168 /* Various constants */ 169 170 /* Coalescing */ 171 #define MVNETA_TXDONE_COAL_PKTS 16 172 #define MVNETA_RX_COAL_PKTS 32 173 #define MVNETA_RX_COAL_USEC 100 174 175 /* Timer */ 176 #define MVNETA_TX_DONE_TIMER_PERIOD 10 177 178 /* Napi polling weight */ 179 #define MVNETA_RX_POLL_WEIGHT 64 180 181 /* The two bytes Marvell header. Either contains a special value used 182 * by Marvell switches when a specific hardware mode is enabled (not 183 * supported by this driver) or is filled automatically by zeroes on 184 * the RX side. Those two bytes being at the front of the Ethernet 185 * header, they allow to have the IP header aligned on a 4 bytes 186 * boundary automatically: the hardware skips those two bytes on its 187 * own. 188 */ 189 #define MVNETA_MH_SIZE 2 190 191 #define MVNETA_VLAN_TAG_LEN 4 192 193 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 194 #define MVNETA_TX_CSUM_MAX_SIZE 9800 195 #define MVNETA_ACC_MODE_EXT 1 196 197 /* Timeout constants */ 198 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 199 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 200 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 201 202 #define MVNETA_TX_MTU_MAX 0x3ffff 203 204 /* Max number of Rx descriptors */ 205 #define MVNETA_MAX_RXD 128 206 207 /* Max number of Tx descriptors */ 208 #define MVNETA_MAX_TXD 532 209 210 /* descriptor aligned size */ 211 #define MVNETA_DESC_ALIGNED_SIZE 32 212 213 #define MVNETA_RX_PKT_SIZE(mtu) \ 214 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 215 ETH_HLEN + ETH_FCS_LEN, \ 216 MVNETA_CPU_D_CACHE_LINE_SIZE) 217 218 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 219 220 struct mvneta_stats { 221 struct u64_stats_sync syncp; 222 u64 packets; 223 u64 bytes; 224 }; 225 226 struct mvneta_port { 227 int pkt_size; 228 void __iomem *base; 229 struct mvneta_rx_queue *rxqs; 230 struct mvneta_tx_queue *txqs; 231 struct timer_list tx_done_timer; 232 struct net_device *dev; 233 234 u32 cause_rx_tx; 235 struct napi_struct napi; 236 237 /* Flags */ 238 unsigned long flags; 239 #define MVNETA_F_TX_DONE_TIMER_BIT 0 240 241 /* Napi weight */ 242 int weight; 243 244 /* Core clock */ 245 struct clk *clk; 246 u8 mcast_count[256]; 247 u16 tx_ring_size; 248 u16 rx_ring_size; 249 struct mvneta_stats tx_stats; 250 struct mvneta_stats rx_stats; 251 252 struct mii_bus *mii_bus; 253 struct phy_device *phy_dev; 254 phy_interface_t phy_interface; 255 struct device_node *phy_node; 256 unsigned int link; 257 unsigned int duplex; 258 unsigned int speed; 259 }; 260 261 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the 262 * layout of the transmit and reception DMA descriptors, and their 263 * layout is therefore defined by the hardware design 264 */ 265 struct mvneta_tx_desc { 266 u32 command; /* Options used by HW for packet transmitting.*/ 267 #define MVNETA_TX_L3_OFF_SHIFT 0 268 #define MVNETA_TX_IP_HLEN_SHIFT 8 269 #define MVNETA_TX_L4_UDP BIT(16) 270 #define MVNETA_TX_L3_IP6 BIT(17) 271 #define MVNETA_TXD_IP_CSUM BIT(18) 272 #define MVNETA_TXD_Z_PAD BIT(19) 273 #define MVNETA_TXD_L_DESC BIT(20) 274 #define MVNETA_TXD_F_DESC BIT(21) 275 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ 276 MVNETA_TXD_L_DESC | \ 277 MVNETA_TXD_F_DESC) 278 #define MVNETA_TX_L4_CSUM_FULL BIT(30) 279 #define MVNETA_TX_L4_CSUM_NOT BIT(31) 280 281 u16 reserverd1; /* csum_l4 (for future use) */ 282 u16 data_size; /* Data size of transmitted packet in bytes */ 283 u32 buf_phys_addr; /* Physical addr of transmitted buffer */ 284 u32 reserved2; /* hw_cmd - (for future use, PMT) */ 285 u32 reserved3[4]; /* Reserved - (for future use) */ 286 }; 287 288 struct mvneta_rx_desc { 289 u32 status; /* Info about received packet */ 290 #define MVNETA_RXD_ERR_CRC 0x0 291 #define MVNETA_RXD_ERR_SUMMARY BIT(16) 292 #define MVNETA_RXD_ERR_OVERRUN BIT(17) 293 #define MVNETA_RXD_ERR_LEN BIT(18) 294 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) 295 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) 296 #define MVNETA_RXD_L3_IP4 BIT(25) 297 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) 298 #define MVNETA_RXD_L4_CSUM_OK BIT(30) 299 300 u16 reserved1; /* pnc_info - (for future use, PnC) */ 301 u16 data_size; /* Size of received packet in bytes */ 302 u32 buf_phys_addr; /* Physical address of the buffer */ 303 u32 reserved2; /* pnc_flow_id (for future use, PnC) */ 304 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 305 u16 reserved3; /* prefetch_cmd, for future use */ 306 u16 reserved4; /* csum_l4 - (for future use, PnC) */ 307 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ 308 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ 309 }; 310 311 struct mvneta_tx_queue { 312 /* Number of this TX queue, in the range 0-7 */ 313 u8 id; 314 315 /* Number of TX DMA descriptors in the descriptor ring */ 316 int size; 317 318 /* Number of currently used TX DMA descriptor in the 319 * descriptor ring 320 */ 321 int count; 322 323 /* Array of transmitted skb */ 324 struct sk_buff **tx_skb; 325 326 /* Index of last TX DMA descriptor that was inserted */ 327 int txq_put_index; 328 329 /* Index of the TX DMA descriptor to be cleaned up */ 330 int txq_get_index; 331 332 u32 done_pkts_coal; 333 334 /* Virtual address of the TX DMA descriptors array */ 335 struct mvneta_tx_desc *descs; 336 337 /* DMA address of the TX DMA descriptors array */ 338 dma_addr_t descs_phys; 339 340 /* Index of the last TX DMA descriptor */ 341 int last_desc; 342 343 /* Index of the next TX DMA descriptor to process */ 344 int next_desc_to_proc; 345 }; 346 347 struct mvneta_rx_queue { 348 /* rx queue number, in the range 0-7 */ 349 u8 id; 350 351 /* num of rx descriptors in the rx descriptor ring */ 352 int size; 353 354 /* counter of times when mvneta_refill() failed */ 355 int missed; 356 357 u32 pkts_coal; 358 u32 time_coal; 359 360 /* Virtual address of the RX DMA descriptors array */ 361 struct mvneta_rx_desc *descs; 362 363 /* DMA address of the RX DMA descriptors array */ 364 dma_addr_t descs_phys; 365 366 /* Index of the last RX DMA descriptor */ 367 int last_desc; 368 369 /* Index of the next RX DMA descriptor to process */ 370 int next_desc_to_proc; 371 }; 372 373 static int rxq_number = 8; 374 static int txq_number = 8; 375 376 static int rxq_def; 377 378 #define MVNETA_DRIVER_NAME "mvneta" 379 #define MVNETA_DRIVER_VERSION "1.0" 380 381 /* Utility/helper methods */ 382 383 /* Write helper method */ 384 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) 385 { 386 writel(data, pp->base + offset); 387 } 388 389 /* Read helper method */ 390 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) 391 { 392 return readl(pp->base + offset); 393 } 394 395 /* Increment txq get counter */ 396 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) 397 { 398 txq->txq_get_index++; 399 if (txq->txq_get_index == txq->size) 400 txq->txq_get_index = 0; 401 } 402 403 /* Increment txq put counter */ 404 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) 405 { 406 txq->txq_put_index++; 407 if (txq->txq_put_index == txq->size) 408 txq->txq_put_index = 0; 409 } 410 411 412 /* Clear all MIB counters */ 413 static void mvneta_mib_counters_clear(struct mvneta_port *pp) 414 { 415 int i; 416 u32 dummy; 417 418 /* Perform dummy reads from MIB counters */ 419 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) 420 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); 421 } 422 423 /* Get System Network Statistics */ 424 struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev, 425 struct rtnl_link_stats64 *stats) 426 { 427 struct mvneta_port *pp = netdev_priv(dev); 428 unsigned int start; 429 430 memset(stats, 0, sizeof(struct rtnl_link_stats64)); 431 432 do { 433 start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp); 434 stats->rx_packets = pp->rx_stats.packets; 435 stats->rx_bytes = pp->rx_stats.bytes; 436 } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start)); 437 438 439 do { 440 start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp); 441 stats->tx_packets = pp->tx_stats.packets; 442 stats->tx_bytes = pp->tx_stats.bytes; 443 } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start)); 444 445 stats->rx_errors = dev->stats.rx_errors; 446 stats->rx_dropped = dev->stats.rx_dropped; 447 448 stats->tx_dropped = dev->stats.tx_dropped; 449 450 return stats; 451 } 452 453 /* Rx descriptors helper methods */ 454 455 /* Checks whether the given RX descriptor is both the first and the 456 * last descriptor for the RX packet. Each RX packet is currently 457 * received through a single RX descriptor, so not having each RX 458 * descriptor with its first and last bits set is an error 459 */ 460 static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc) 461 { 462 return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) == 463 MVNETA_RXD_FIRST_LAST_DESC; 464 } 465 466 /* Add number of descriptors ready to receive new packets */ 467 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, 468 struct mvneta_rx_queue *rxq, 469 int ndescs) 470 { 471 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can 472 * be added at once 473 */ 474 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { 475 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 476 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << 477 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 478 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; 479 } 480 481 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), 482 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); 483 } 484 485 /* Get number of RX descriptors occupied by received packets */ 486 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, 487 struct mvneta_rx_queue *rxq) 488 { 489 u32 val; 490 491 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); 492 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; 493 } 494 495 /* Update num of rx desc called upon return from rx path or 496 * from mvneta_rxq_drop_pkts(). 497 */ 498 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, 499 struct mvneta_rx_queue *rxq, 500 int rx_done, int rx_filled) 501 { 502 u32 val; 503 504 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { 505 val = rx_done | 506 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); 507 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 508 return; 509 } 510 511 /* Only 255 descriptors can be added at once */ 512 while ((rx_done > 0) || (rx_filled > 0)) { 513 if (rx_done <= 0xff) { 514 val = rx_done; 515 rx_done = 0; 516 } else { 517 val = 0xff; 518 rx_done -= 0xff; 519 } 520 if (rx_filled <= 0xff) { 521 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 522 rx_filled = 0; 523 } else { 524 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; 525 rx_filled -= 0xff; 526 } 527 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); 528 } 529 } 530 531 /* Get pointer to next RX descriptor to be processed by SW */ 532 static struct mvneta_rx_desc * 533 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) 534 { 535 int rx_desc = rxq->next_desc_to_proc; 536 537 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); 538 return rxq->descs + rx_desc; 539 } 540 541 /* Change maximum receive size of the port. */ 542 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) 543 { 544 u32 val; 545 546 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 547 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; 548 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << 549 MVNETA_GMAC_MAX_RX_SIZE_SHIFT; 550 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 551 } 552 553 554 /* Set rx queue offset */ 555 static void mvneta_rxq_offset_set(struct mvneta_port *pp, 556 struct mvneta_rx_queue *rxq, 557 int offset) 558 { 559 u32 val; 560 561 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 562 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; 563 564 /* Offset is in */ 565 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); 566 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 567 } 568 569 570 /* Tx descriptors helper methods */ 571 572 /* Update HW with number of TX descriptors to be sent */ 573 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, 574 struct mvneta_tx_queue *txq, 575 int pend_desc) 576 { 577 u32 val; 578 579 /* Only 255 descriptors can be added at once ; Assume caller 580 * process TX desriptors in quanta less than 256 581 */ 582 val = pend_desc; 583 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 584 } 585 586 /* Get pointer to next TX descriptor to be processed (send) by HW */ 587 static struct mvneta_tx_desc * 588 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) 589 { 590 int tx_desc = txq->next_desc_to_proc; 591 592 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); 593 return txq->descs + tx_desc; 594 } 595 596 /* Release the last allocated TX descriptor. Useful to handle DMA 597 * mapping failures in the TX path. 598 */ 599 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) 600 { 601 if (txq->next_desc_to_proc == 0) 602 txq->next_desc_to_proc = txq->last_desc - 1; 603 else 604 txq->next_desc_to_proc--; 605 } 606 607 /* Set rxq buf size */ 608 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, 609 struct mvneta_rx_queue *rxq, 610 int buf_size) 611 { 612 u32 val; 613 614 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); 615 616 val &= ~MVNETA_RXQ_BUF_SIZE_MASK; 617 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); 618 619 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); 620 } 621 622 /* Disable buffer management (BM) */ 623 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, 624 struct mvneta_rx_queue *rxq) 625 { 626 u32 val; 627 628 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); 629 val &= ~MVNETA_RXQ_HW_BUF_ALLOC; 630 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 631 } 632 633 634 635 /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */ 636 static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable) 637 { 638 u32 val; 639 640 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 641 642 if (enable) 643 val |= MVNETA_GMAC2_PORT_RGMII; 644 else 645 val &= ~MVNETA_GMAC2_PORT_RGMII; 646 647 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 648 } 649 650 /* Config SGMII port */ 651 static void mvneta_port_sgmii_config(struct mvneta_port *pp) 652 { 653 u32 val; 654 655 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 656 val |= MVNETA_GMAC2_PSC_ENABLE; 657 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 658 } 659 660 /* Start the Ethernet port RX and TX activity */ 661 static void mvneta_port_up(struct mvneta_port *pp) 662 { 663 int queue; 664 u32 q_map; 665 666 /* Enable all initialized TXs. */ 667 mvneta_mib_counters_clear(pp); 668 q_map = 0; 669 for (queue = 0; queue < txq_number; queue++) { 670 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 671 if (txq->descs != NULL) 672 q_map |= (1 << queue); 673 } 674 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); 675 676 /* Enable all initialized RXQs. */ 677 q_map = 0; 678 for (queue = 0; queue < rxq_number; queue++) { 679 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 680 if (rxq->descs != NULL) 681 q_map |= (1 << queue); 682 } 683 684 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); 685 } 686 687 /* Stop the Ethernet port activity */ 688 static void mvneta_port_down(struct mvneta_port *pp) 689 { 690 u32 val; 691 int count; 692 693 /* Stop Rx port activity. Check port Rx activity. */ 694 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; 695 696 /* Issue stop command for active channels only */ 697 if (val != 0) 698 mvreg_write(pp, MVNETA_RXQ_CMD, 699 val << MVNETA_RXQ_DISABLE_SHIFT); 700 701 /* Wait for all Rx activity to terminate. */ 702 count = 0; 703 do { 704 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { 705 netdev_warn(pp->dev, 706 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", 707 val); 708 break; 709 } 710 mdelay(1); 711 712 val = mvreg_read(pp, MVNETA_RXQ_CMD); 713 } while (val & 0xff); 714 715 /* Stop Tx port activity. Check port Tx activity. Issue stop 716 * command for active channels only 717 */ 718 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; 719 720 if (val != 0) 721 mvreg_write(pp, MVNETA_TXQ_CMD, 722 (val << MVNETA_TXQ_DISABLE_SHIFT)); 723 724 /* Wait for all Tx activity to terminate. */ 725 count = 0; 726 do { 727 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { 728 netdev_warn(pp->dev, 729 "TIMEOUT for TX stopped status=0x%08x\n", 730 val); 731 break; 732 } 733 mdelay(1); 734 735 /* Check TX Command reg that all Txqs are stopped */ 736 val = mvreg_read(pp, MVNETA_TXQ_CMD); 737 738 } while (val & 0xff); 739 740 /* Double check to verify that TX FIFO is empty */ 741 count = 0; 742 do { 743 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { 744 netdev_warn(pp->dev, 745 "TX FIFO empty timeout status=0x08%x\n", 746 val); 747 break; 748 } 749 mdelay(1); 750 751 val = mvreg_read(pp, MVNETA_PORT_STATUS); 752 } while (!(val & MVNETA_TX_FIFO_EMPTY) && 753 (val & MVNETA_TX_IN_PRGRS)); 754 755 udelay(200); 756 } 757 758 /* Enable the port by setting the port enable bit of the MAC control register */ 759 static void mvneta_port_enable(struct mvneta_port *pp) 760 { 761 u32 val; 762 763 /* Enable port */ 764 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 765 val |= MVNETA_GMAC0_PORT_ENABLE; 766 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 767 } 768 769 /* Disable the port and wait for about 200 usec before retuning */ 770 static void mvneta_port_disable(struct mvneta_port *pp) 771 { 772 u32 val; 773 774 /* Reset the Enable bit in the Serial Control Register */ 775 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); 776 val &= ~MVNETA_GMAC0_PORT_ENABLE; 777 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); 778 779 udelay(200); 780 } 781 782 /* Multicast tables methods */ 783 784 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ 785 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) 786 { 787 int offset; 788 u32 val; 789 790 if (queue == -1) { 791 val = 0; 792 } else { 793 val = 0x1 | (queue << 1); 794 val |= (val << 24) | (val << 16) | (val << 8); 795 } 796 797 for (offset = 0; offset <= 0xc; offset += 4) 798 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); 799 } 800 801 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ 802 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) 803 { 804 int offset; 805 u32 val; 806 807 if (queue == -1) { 808 val = 0; 809 } else { 810 val = 0x1 | (queue << 1); 811 val |= (val << 24) | (val << 16) | (val << 8); 812 } 813 814 for (offset = 0; offset <= 0xfc; offset += 4) 815 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); 816 817 } 818 819 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ 820 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) 821 { 822 int offset; 823 u32 val; 824 825 if (queue == -1) { 826 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); 827 val = 0; 828 } else { 829 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); 830 val = 0x1 | (queue << 1); 831 val |= (val << 24) | (val << 16) | (val << 8); 832 } 833 834 for (offset = 0; offset <= 0xfc; offset += 4) 835 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); 836 } 837 838 /* This method sets defaults to the NETA port: 839 * Clears interrupt Cause and Mask registers. 840 * Clears all MAC tables. 841 * Sets defaults to all registers. 842 * Resets RX and TX descriptor rings. 843 * Resets PHY. 844 * This method can be called after mvneta_port_down() to return the port 845 * settings to defaults. 846 */ 847 static void mvneta_defaults_set(struct mvneta_port *pp) 848 { 849 int cpu; 850 int queue; 851 u32 val; 852 853 /* Clear all Cause registers */ 854 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 855 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 856 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 857 858 /* Mask all interrupts */ 859 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 860 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 861 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 862 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 863 864 /* Enable MBUS Retry bit16 */ 865 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); 866 867 /* Set CPU queue access map - all CPUs have access to all RX 868 * queues and to all TX queues 869 */ 870 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) 871 mvreg_write(pp, MVNETA_CPU_MAP(cpu), 872 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | 873 MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); 874 875 /* Reset RX and TX DMAs */ 876 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 877 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 878 879 /* Disable Legacy WRR, Disable EJP, Release from reset */ 880 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); 881 for (queue = 0; queue < txq_number; queue++) { 882 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); 883 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); 884 } 885 886 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 887 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 888 889 /* Set Port Acceleration Mode */ 890 val = MVNETA_ACC_MODE_EXT; 891 mvreg_write(pp, MVNETA_ACC_MODE, val); 892 893 /* Update val of portCfg register accordingly with all RxQueue types */ 894 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); 895 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 896 897 val = 0; 898 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); 899 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); 900 901 /* Build PORT_SDMA_CONFIG_REG */ 902 val = 0; 903 904 /* Default burst size */ 905 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 906 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); 907 908 val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP | 909 MVNETA_NO_DESC_SWAP); 910 911 /* Assign port SDMA configuration */ 912 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); 913 914 mvneta_set_ucast_table(pp, -1); 915 mvneta_set_special_mcast_table(pp, -1); 916 mvneta_set_other_mcast_table(pp, -1); 917 918 /* Set port interrupt enable register - default enable all */ 919 mvreg_write(pp, MVNETA_INTR_ENABLE, 920 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK 921 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); 922 } 923 924 /* Set max sizes for tx queues */ 925 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) 926 927 { 928 u32 val, size, mtu; 929 int queue; 930 931 mtu = max_tx_size * 8; 932 if (mtu > MVNETA_TX_MTU_MAX) 933 mtu = MVNETA_TX_MTU_MAX; 934 935 /* Set MTU */ 936 val = mvreg_read(pp, MVNETA_TX_MTU); 937 val &= ~MVNETA_TX_MTU_MAX; 938 val |= mtu; 939 mvreg_write(pp, MVNETA_TX_MTU, val); 940 941 /* TX token size and all TXQs token size must be larger that MTU */ 942 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); 943 944 size = val & MVNETA_TX_TOKEN_SIZE_MAX; 945 if (size < mtu) { 946 size = mtu; 947 val &= ~MVNETA_TX_TOKEN_SIZE_MAX; 948 val |= size; 949 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); 950 } 951 for (queue = 0; queue < txq_number; queue++) { 952 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); 953 954 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; 955 if (size < mtu) { 956 size = mtu; 957 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; 958 val |= size; 959 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); 960 } 961 } 962 } 963 964 /* Set unicast address */ 965 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, 966 int queue) 967 { 968 unsigned int unicast_reg; 969 unsigned int tbl_offset; 970 unsigned int reg_offset; 971 972 /* Locate the Unicast table entry */ 973 last_nibble = (0xf & last_nibble); 974 975 /* offset from unicast tbl base */ 976 tbl_offset = (last_nibble / 4) * 4; 977 978 /* offset within the above reg */ 979 reg_offset = last_nibble % 4; 980 981 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); 982 983 if (queue == -1) { 984 /* Clear accepts frame bit at specified unicast DA tbl entry */ 985 unicast_reg &= ~(0xff << (8 * reg_offset)); 986 } else { 987 unicast_reg &= ~(0xff << (8 * reg_offset)); 988 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 989 } 990 991 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); 992 } 993 994 /* Set mac address */ 995 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, 996 int queue) 997 { 998 unsigned int mac_h; 999 unsigned int mac_l; 1000 1001 if (queue != -1) { 1002 mac_l = (addr[4] << 8) | (addr[5]); 1003 mac_h = (addr[0] << 24) | (addr[1] << 16) | 1004 (addr[2] << 8) | (addr[3] << 0); 1005 1006 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); 1007 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); 1008 } 1009 1010 /* Accept frames of this address */ 1011 mvneta_set_ucast_addr(pp, addr[5], queue); 1012 } 1013 1014 /* Set the number of packets that will be received before RX interrupt 1015 * will be generated by HW. 1016 */ 1017 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, 1018 struct mvneta_rx_queue *rxq, u32 value) 1019 { 1020 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), 1021 value | MVNETA_RXQ_NON_OCCUPIED(0)); 1022 rxq->pkts_coal = value; 1023 } 1024 1025 /* Set the time delay in usec before RX interrupt will be generated by 1026 * HW. 1027 */ 1028 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, 1029 struct mvneta_rx_queue *rxq, u32 value) 1030 { 1031 u32 val; 1032 unsigned long clk_rate; 1033 1034 clk_rate = clk_get_rate(pp->clk); 1035 val = (clk_rate / 1000000) * value; 1036 1037 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); 1038 rxq->time_coal = value; 1039 } 1040 1041 /* Set threshold for TX_DONE pkts coalescing */ 1042 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, 1043 struct mvneta_tx_queue *txq, u32 value) 1044 { 1045 u32 val; 1046 1047 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); 1048 1049 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; 1050 val |= MVNETA_TXQ_SENT_THRESH_MASK(value); 1051 1052 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); 1053 1054 txq->done_pkts_coal = value; 1055 } 1056 1057 /* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */ 1058 static void mvneta_add_tx_done_timer(struct mvneta_port *pp) 1059 { 1060 if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) { 1061 pp->tx_done_timer.expires = jiffies + 1062 msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD); 1063 add_timer(&pp->tx_done_timer); 1064 } 1065 } 1066 1067 1068 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ 1069 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, 1070 u32 phys_addr, u32 cookie) 1071 { 1072 rx_desc->buf_cookie = cookie; 1073 rx_desc->buf_phys_addr = phys_addr; 1074 } 1075 1076 /* Decrement sent descriptors counter */ 1077 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, 1078 struct mvneta_tx_queue *txq, 1079 int sent_desc) 1080 { 1081 u32 val; 1082 1083 /* Only 255 TX descriptors can be updated at once */ 1084 while (sent_desc > 0xff) { 1085 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; 1086 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1087 sent_desc = sent_desc - 0xff; 1088 } 1089 1090 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; 1091 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); 1092 } 1093 1094 /* Get number of TX descriptors already sent by HW */ 1095 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, 1096 struct mvneta_tx_queue *txq) 1097 { 1098 u32 val; 1099 int sent_desc; 1100 1101 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); 1102 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> 1103 MVNETA_TXQ_SENT_DESC_SHIFT; 1104 1105 return sent_desc; 1106 } 1107 1108 /* Get number of sent descriptors and decrement counter. 1109 * The number of sent descriptors is returned. 1110 */ 1111 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, 1112 struct mvneta_tx_queue *txq) 1113 { 1114 int sent_desc; 1115 1116 /* Get number of sent descriptors */ 1117 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); 1118 1119 /* Decrement sent descriptors counter */ 1120 if (sent_desc) 1121 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); 1122 1123 return sent_desc; 1124 } 1125 1126 /* Set TXQ descriptors fields relevant for CSUM calculation */ 1127 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, 1128 int ip_hdr_len, int l4_proto) 1129 { 1130 u32 command; 1131 1132 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 1133 * G_L4_chk, L4_type; required only for checksum 1134 * calculation 1135 */ 1136 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1137 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1138 1139 if (l3_proto == swab16(ETH_P_IP)) 1140 command |= MVNETA_TXD_IP_CSUM; 1141 else 1142 command |= MVNETA_TX_L3_IP6; 1143 1144 if (l4_proto == IPPROTO_TCP) 1145 command |= MVNETA_TX_L4_CSUM_FULL; 1146 else if (l4_proto == IPPROTO_UDP) 1147 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; 1148 else 1149 command |= MVNETA_TX_L4_CSUM_NOT; 1150 1151 return command; 1152 } 1153 1154 1155 /* Display more error info */ 1156 static void mvneta_rx_error(struct mvneta_port *pp, 1157 struct mvneta_rx_desc *rx_desc) 1158 { 1159 u32 status = rx_desc->status; 1160 1161 if (!mvneta_rxq_desc_is_first_last(rx_desc)) { 1162 netdev_err(pp->dev, 1163 "bad rx status %08x (buffer oversize), size=%d\n", 1164 rx_desc->status, rx_desc->data_size); 1165 return; 1166 } 1167 1168 switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1169 case MVNETA_RXD_ERR_CRC: 1170 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", 1171 status, rx_desc->data_size); 1172 break; 1173 case MVNETA_RXD_ERR_OVERRUN: 1174 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", 1175 status, rx_desc->data_size); 1176 break; 1177 case MVNETA_RXD_ERR_LEN: 1178 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", 1179 status, rx_desc->data_size); 1180 break; 1181 case MVNETA_RXD_ERR_RESOURCE: 1182 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", 1183 status, rx_desc->data_size); 1184 break; 1185 } 1186 } 1187 1188 /* Handle RX checksum offload */ 1189 static void mvneta_rx_csum(struct mvneta_port *pp, 1190 struct mvneta_rx_desc *rx_desc, 1191 struct sk_buff *skb) 1192 { 1193 if ((rx_desc->status & MVNETA_RXD_L3_IP4) && 1194 (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) { 1195 skb->csum = 0; 1196 skb->ip_summed = CHECKSUM_UNNECESSARY; 1197 return; 1198 } 1199 1200 skb->ip_summed = CHECKSUM_NONE; 1201 } 1202 1203 /* Return tx queue pointer (find last set bit) according to causeTxDone reg */ 1204 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, 1205 u32 cause) 1206 { 1207 int queue = fls(cause) - 1; 1208 1209 return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue]; 1210 } 1211 1212 /* Free tx queue skbuffs */ 1213 static void mvneta_txq_bufs_free(struct mvneta_port *pp, 1214 struct mvneta_tx_queue *txq, int num) 1215 { 1216 int i; 1217 1218 for (i = 0; i < num; i++) { 1219 struct mvneta_tx_desc *tx_desc = txq->descs + 1220 txq->txq_get_index; 1221 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; 1222 1223 mvneta_txq_inc_get(txq); 1224 1225 if (!skb) 1226 continue; 1227 1228 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, 1229 tx_desc->data_size, DMA_TO_DEVICE); 1230 dev_kfree_skb_any(skb); 1231 } 1232 } 1233 1234 /* Handle end of transmission */ 1235 static int mvneta_txq_done(struct mvneta_port *pp, 1236 struct mvneta_tx_queue *txq) 1237 { 1238 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); 1239 int tx_done; 1240 1241 tx_done = mvneta_txq_sent_desc_proc(pp, txq); 1242 if (tx_done == 0) 1243 return tx_done; 1244 mvneta_txq_bufs_free(pp, txq, tx_done); 1245 1246 txq->count -= tx_done; 1247 1248 if (netif_tx_queue_stopped(nq)) { 1249 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1) 1250 netif_tx_wake_queue(nq); 1251 } 1252 1253 return tx_done; 1254 } 1255 1256 /* Refill processing */ 1257 static int mvneta_rx_refill(struct mvneta_port *pp, 1258 struct mvneta_rx_desc *rx_desc) 1259 1260 { 1261 dma_addr_t phys_addr; 1262 struct sk_buff *skb; 1263 1264 skb = netdev_alloc_skb(pp->dev, pp->pkt_size); 1265 if (!skb) 1266 return -ENOMEM; 1267 1268 phys_addr = dma_map_single(pp->dev->dev.parent, skb->head, 1269 MVNETA_RX_BUF_SIZE(pp->pkt_size), 1270 DMA_FROM_DEVICE); 1271 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { 1272 dev_kfree_skb(skb); 1273 return -ENOMEM; 1274 } 1275 1276 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb); 1277 1278 return 0; 1279 } 1280 1281 /* Handle tx checksum */ 1282 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) 1283 { 1284 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1285 int ip_hdr_len = 0; 1286 u8 l4_proto; 1287 1288 if (skb->protocol == htons(ETH_P_IP)) { 1289 struct iphdr *ip4h = ip_hdr(skb); 1290 1291 /* Calculate IPv4 checksum and L4 checksum */ 1292 ip_hdr_len = ip4h->ihl; 1293 l4_proto = ip4h->protocol; 1294 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1295 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1296 1297 /* Read l4_protocol from one of IPv6 extra headers */ 1298 if (skb_network_header_len(skb) > 0) 1299 ip_hdr_len = (skb_network_header_len(skb) >> 2); 1300 l4_proto = ip6h->nexthdr; 1301 } else 1302 return MVNETA_TX_L4_CSUM_NOT; 1303 1304 return mvneta_txq_desc_csum(skb_network_offset(skb), 1305 skb->protocol, ip_hdr_len, l4_proto); 1306 } 1307 1308 return MVNETA_TX_L4_CSUM_NOT; 1309 } 1310 1311 /* Returns rx queue pointer (find last set bit) according to causeRxTx 1312 * value 1313 */ 1314 static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp, 1315 u32 cause) 1316 { 1317 int queue = fls(cause >> 8) - 1; 1318 1319 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue]; 1320 } 1321 1322 /* Drop packets received by the RXQ and free buffers */ 1323 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, 1324 struct mvneta_rx_queue *rxq) 1325 { 1326 int rx_done, i; 1327 1328 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1329 for (i = 0; i < rxq->size; i++) { 1330 struct mvneta_rx_desc *rx_desc = rxq->descs + i; 1331 struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie; 1332 1333 dev_kfree_skb_any(skb); 1334 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1335 rx_desc->data_size, DMA_FROM_DEVICE); 1336 } 1337 1338 if (rx_done) 1339 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1340 } 1341 1342 /* Main rx processing */ 1343 static int mvneta_rx(struct mvneta_port *pp, int rx_todo, 1344 struct mvneta_rx_queue *rxq) 1345 { 1346 struct net_device *dev = pp->dev; 1347 int rx_done, rx_filled; 1348 1349 /* Get number of received packets */ 1350 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); 1351 1352 if (rx_todo > rx_done) 1353 rx_todo = rx_done; 1354 1355 rx_done = 0; 1356 rx_filled = 0; 1357 1358 /* Fairness NAPI loop */ 1359 while (rx_done < rx_todo) { 1360 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 1361 struct sk_buff *skb; 1362 u32 rx_status; 1363 int rx_bytes, err; 1364 1365 prefetch(rx_desc); 1366 rx_done++; 1367 rx_filled++; 1368 rx_status = rx_desc->status; 1369 skb = (struct sk_buff *)rx_desc->buf_cookie; 1370 1371 if (!mvneta_rxq_desc_is_first_last(rx_desc) || 1372 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 1373 dev->stats.rx_errors++; 1374 mvneta_rx_error(pp, rx_desc); 1375 mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr, 1376 (u32)skb); 1377 continue; 1378 } 1379 1380 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1381 rx_desc->data_size, DMA_FROM_DEVICE); 1382 1383 rx_bytes = rx_desc->data_size - 1384 (ETH_FCS_LEN + MVNETA_MH_SIZE); 1385 u64_stats_update_begin(&pp->rx_stats.syncp); 1386 pp->rx_stats.packets++; 1387 pp->rx_stats.bytes += rx_bytes; 1388 u64_stats_update_end(&pp->rx_stats.syncp); 1389 1390 /* Linux processing */ 1391 skb_reserve(skb, MVNETA_MH_SIZE); 1392 skb_put(skb, rx_bytes); 1393 1394 skb->protocol = eth_type_trans(skb, dev); 1395 1396 mvneta_rx_csum(pp, rx_desc, skb); 1397 1398 napi_gro_receive(&pp->napi, skb); 1399 1400 /* Refill processing */ 1401 err = mvneta_rx_refill(pp, rx_desc); 1402 if (err) { 1403 netdev_err(pp->dev, "Linux processing - Can't refill\n"); 1404 rxq->missed++; 1405 rx_filled--; 1406 } 1407 } 1408 1409 /* Update rxq management counters */ 1410 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); 1411 1412 return rx_done; 1413 } 1414 1415 /* Handle tx fragmentation processing */ 1416 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, 1417 struct mvneta_tx_queue *txq) 1418 { 1419 struct mvneta_tx_desc *tx_desc; 1420 int i; 1421 1422 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1423 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1424 void *addr = page_address(frag->page.p) + frag->page_offset; 1425 1426 tx_desc = mvneta_txq_next_desc_get(txq); 1427 tx_desc->data_size = frag->size; 1428 1429 tx_desc->buf_phys_addr = 1430 dma_map_single(pp->dev->dev.parent, addr, 1431 tx_desc->data_size, DMA_TO_DEVICE); 1432 1433 if (dma_mapping_error(pp->dev->dev.parent, 1434 tx_desc->buf_phys_addr)) { 1435 mvneta_txq_desc_put(txq); 1436 goto error; 1437 } 1438 1439 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 1440 /* Last descriptor */ 1441 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; 1442 1443 txq->tx_skb[txq->txq_put_index] = skb; 1444 1445 mvneta_txq_inc_put(txq); 1446 } else { 1447 /* Descriptor in the middle: Not First, Not Last */ 1448 tx_desc->command = 0; 1449 1450 txq->tx_skb[txq->txq_put_index] = NULL; 1451 mvneta_txq_inc_put(txq); 1452 } 1453 } 1454 1455 return 0; 1456 1457 error: 1458 /* Release all descriptors that were used to map fragments of 1459 * this packet, as well as the corresponding DMA mappings 1460 */ 1461 for (i = i - 1; i >= 0; i--) { 1462 tx_desc = txq->descs + i; 1463 dma_unmap_single(pp->dev->dev.parent, 1464 tx_desc->buf_phys_addr, 1465 tx_desc->data_size, 1466 DMA_TO_DEVICE); 1467 mvneta_txq_desc_put(txq); 1468 } 1469 1470 return -ENOMEM; 1471 } 1472 1473 /* Main tx processing */ 1474 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) 1475 { 1476 struct mvneta_port *pp = netdev_priv(dev); 1477 u16 txq_id = skb_get_queue_mapping(skb); 1478 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; 1479 struct mvneta_tx_desc *tx_desc; 1480 struct netdev_queue *nq; 1481 int frags = 0; 1482 u32 tx_cmd; 1483 1484 if (!netif_running(dev)) 1485 goto out; 1486 1487 frags = skb_shinfo(skb)->nr_frags + 1; 1488 nq = netdev_get_tx_queue(dev, txq_id); 1489 1490 /* Get a descriptor for the first part of the packet */ 1491 tx_desc = mvneta_txq_next_desc_get(txq); 1492 1493 tx_cmd = mvneta_skb_tx_csum(pp, skb); 1494 1495 tx_desc->data_size = skb_headlen(skb); 1496 1497 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, 1498 tx_desc->data_size, 1499 DMA_TO_DEVICE); 1500 if (unlikely(dma_mapping_error(dev->dev.parent, 1501 tx_desc->buf_phys_addr))) { 1502 mvneta_txq_desc_put(txq); 1503 frags = 0; 1504 goto out; 1505 } 1506 1507 if (frags == 1) { 1508 /* First and Last descriptor */ 1509 tx_cmd |= MVNETA_TXD_FLZ_DESC; 1510 tx_desc->command = tx_cmd; 1511 txq->tx_skb[txq->txq_put_index] = skb; 1512 mvneta_txq_inc_put(txq); 1513 } else { 1514 /* First but not Last */ 1515 tx_cmd |= MVNETA_TXD_F_DESC; 1516 txq->tx_skb[txq->txq_put_index] = NULL; 1517 mvneta_txq_inc_put(txq); 1518 tx_desc->command = tx_cmd; 1519 /* Continue with other skb fragments */ 1520 if (mvneta_tx_frag_process(pp, skb, txq)) { 1521 dma_unmap_single(dev->dev.parent, 1522 tx_desc->buf_phys_addr, 1523 tx_desc->data_size, 1524 DMA_TO_DEVICE); 1525 mvneta_txq_desc_put(txq); 1526 frags = 0; 1527 goto out; 1528 } 1529 } 1530 1531 txq->count += frags; 1532 mvneta_txq_pend_desc_add(pp, txq, frags); 1533 1534 if (txq->size - txq->count < MAX_SKB_FRAGS + 1) 1535 netif_tx_stop_queue(nq); 1536 1537 out: 1538 if (frags > 0) { 1539 u64_stats_update_begin(&pp->tx_stats.syncp); 1540 pp->tx_stats.packets++; 1541 pp->tx_stats.bytes += skb->len; 1542 u64_stats_update_end(&pp->tx_stats.syncp); 1543 1544 } else { 1545 dev->stats.tx_dropped++; 1546 dev_kfree_skb_any(skb); 1547 } 1548 1549 if (txq->count >= MVNETA_TXDONE_COAL_PKTS) 1550 mvneta_txq_done(pp, txq); 1551 1552 /* If after calling mvneta_txq_done, count equals 1553 * frags, we need to set the timer 1554 */ 1555 if (txq->count == frags && frags > 0) 1556 mvneta_add_tx_done_timer(pp); 1557 1558 return NETDEV_TX_OK; 1559 } 1560 1561 1562 /* Free tx resources, when resetting a port */ 1563 static void mvneta_txq_done_force(struct mvneta_port *pp, 1564 struct mvneta_tx_queue *txq) 1565 1566 { 1567 int tx_done = txq->count; 1568 1569 mvneta_txq_bufs_free(pp, txq, tx_done); 1570 1571 /* reset txq */ 1572 txq->count = 0; 1573 txq->txq_put_index = 0; 1574 txq->txq_get_index = 0; 1575 } 1576 1577 /* handle tx done - called from tx done timer callback */ 1578 static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done, 1579 int *tx_todo) 1580 { 1581 struct mvneta_tx_queue *txq; 1582 u32 tx_done = 0; 1583 struct netdev_queue *nq; 1584 1585 *tx_todo = 0; 1586 while (cause_tx_done != 0) { 1587 txq = mvneta_tx_done_policy(pp, cause_tx_done); 1588 if (!txq) 1589 break; 1590 1591 nq = netdev_get_tx_queue(pp->dev, txq->id); 1592 __netif_tx_lock(nq, smp_processor_id()); 1593 1594 if (txq->count) { 1595 tx_done += mvneta_txq_done(pp, txq); 1596 *tx_todo += txq->count; 1597 } 1598 1599 __netif_tx_unlock(nq); 1600 cause_tx_done &= ~((1 << txq->id)); 1601 } 1602 1603 return tx_done; 1604 } 1605 1606 /* Compute crc8 of the specified address, using a unique algorithm , 1607 * according to hw spec, different than generic crc8 algorithm 1608 */ 1609 static int mvneta_addr_crc(unsigned char *addr) 1610 { 1611 int crc = 0; 1612 int i; 1613 1614 for (i = 0; i < ETH_ALEN; i++) { 1615 int j; 1616 1617 crc = (crc ^ addr[i]) << 8; 1618 for (j = 7; j >= 0; j--) { 1619 if (crc & (0x100 << j)) 1620 crc ^= 0x107 << j; 1621 } 1622 } 1623 1624 return crc; 1625 } 1626 1627 /* This method controls the net device special MAC multicast support. 1628 * The Special Multicast Table for MAC addresses supports MAC of the form 1629 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 1630 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 1631 * Table entries in the DA-Filter table. This method set the Special 1632 * Multicast Table appropriate entry. 1633 */ 1634 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, 1635 unsigned char last_byte, 1636 int queue) 1637 { 1638 unsigned int smc_table_reg; 1639 unsigned int tbl_offset; 1640 unsigned int reg_offset; 1641 1642 /* Register offset from SMC table base */ 1643 tbl_offset = (last_byte / 4); 1644 /* Entry offset within the above reg */ 1645 reg_offset = last_byte % 4; 1646 1647 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST 1648 + tbl_offset * 4)); 1649 1650 if (queue == -1) 1651 smc_table_reg &= ~(0xff << (8 * reg_offset)); 1652 else { 1653 smc_table_reg &= ~(0xff << (8 * reg_offset)); 1654 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1655 } 1656 1657 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, 1658 smc_table_reg); 1659 } 1660 1661 /* This method controls the network device Other MAC multicast support. 1662 * The Other Multicast Table is used for multicast of another type. 1663 * A CRC-8 is used as an index to the Other Multicast Table entries 1664 * in the DA-Filter table. 1665 * The method gets the CRC-8 value from the calling routine and 1666 * sets the Other Multicast Table appropriate entry according to the 1667 * specified CRC-8 . 1668 */ 1669 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, 1670 unsigned char crc8, 1671 int queue) 1672 { 1673 unsigned int omc_table_reg; 1674 unsigned int tbl_offset; 1675 unsigned int reg_offset; 1676 1677 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ 1678 reg_offset = crc8 % 4; /* Entry offset within the above reg */ 1679 1680 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); 1681 1682 if (queue == -1) { 1683 /* Clear accepts frame bit at specified Other DA table entry */ 1684 omc_table_reg &= ~(0xff << (8 * reg_offset)); 1685 } else { 1686 omc_table_reg &= ~(0xff << (8 * reg_offset)); 1687 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); 1688 } 1689 1690 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); 1691 } 1692 1693 /* The network device supports multicast using two tables: 1694 * 1) Special Multicast Table for MAC addresses of the form 1695 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). 1696 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast 1697 * Table entries in the DA-Filter table. 1698 * 2) Other Multicast Table for multicast of another type. A CRC-8 value 1699 * is used as an index to the Other Multicast Table entries in the 1700 * DA-Filter table. 1701 */ 1702 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, 1703 int queue) 1704 { 1705 unsigned char crc_result = 0; 1706 1707 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { 1708 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); 1709 return 0; 1710 } 1711 1712 crc_result = mvneta_addr_crc(p_addr); 1713 if (queue == -1) { 1714 if (pp->mcast_count[crc_result] == 0) { 1715 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", 1716 crc_result); 1717 return -EINVAL; 1718 } 1719 1720 pp->mcast_count[crc_result]--; 1721 if (pp->mcast_count[crc_result] != 0) { 1722 netdev_info(pp->dev, 1723 "After delete there are %d valid Mcast for crc8=0x%02x\n", 1724 pp->mcast_count[crc_result], crc_result); 1725 return -EINVAL; 1726 } 1727 } else 1728 pp->mcast_count[crc_result]++; 1729 1730 mvneta_set_other_mcast_addr(pp, crc_result, queue); 1731 1732 return 0; 1733 } 1734 1735 /* Configure Fitering mode of Ethernet port */ 1736 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, 1737 int is_promisc) 1738 { 1739 u32 port_cfg_reg, val; 1740 1741 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); 1742 1743 val = mvreg_read(pp, MVNETA_TYPE_PRIO); 1744 1745 /* Set / Clear UPM bit in port configuration register */ 1746 if (is_promisc) { 1747 /* Accept all Unicast addresses */ 1748 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; 1749 val |= MVNETA_FORCE_UNI; 1750 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); 1751 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); 1752 } else { 1753 /* Reject all Unicast addresses */ 1754 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; 1755 val &= ~MVNETA_FORCE_UNI; 1756 } 1757 1758 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); 1759 mvreg_write(pp, MVNETA_TYPE_PRIO, val); 1760 } 1761 1762 /* register unicast and multicast addresses */ 1763 static void mvneta_set_rx_mode(struct net_device *dev) 1764 { 1765 struct mvneta_port *pp = netdev_priv(dev); 1766 struct netdev_hw_addr *ha; 1767 1768 if (dev->flags & IFF_PROMISC) { 1769 /* Accept all: Multicast + Unicast */ 1770 mvneta_rx_unicast_promisc_set(pp, 1); 1771 mvneta_set_ucast_table(pp, rxq_def); 1772 mvneta_set_special_mcast_table(pp, rxq_def); 1773 mvneta_set_other_mcast_table(pp, rxq_def); 1774 } else { 1775 /* Accept single Unicast */ 1776 mvneta_rx_unicast_promisc_set(pp, 0); 1777 mvneta_set_ucast_table(pp, -1); 1778 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); 1779 1780 if (dev->flags & IFF_ALLMULTI) { 1781 /* Accept all multicast */ 1782 mvneta_set_special_mcast_table(pp, rxq_def); 1783 mvneta_set_other_mcast_table(pp, rxq_def); 1784 } else { 1785 /* Accept only initialized multicast */ 1786 mvneta_set_special_mcast_table(pp, -1); 1787 mvneta_set_other_mcast_table(pp, -1); 1788 1789 if (!netdev_mc_empty(dev)) { 1790 netdev_for_each_mc_addr(ha, dev) { 1791 mvneta_mcast_addr_set(pp, ha->addr, 1792 rxq_def); 1793 } 1794 } 1795 } 1796 } 1797 } 1798 1799 /* Interrupt handling - the callback for request_irq() */ 1800 static irqreturn_t mvneta_isr(int irq, void *dev_id) 1801 { 1802 struct mvneta_port *pp = (struct mvneta_port *)dev_id; 1803 1804 /* Mask all interrupts */ 1805 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1806 1807 napi_schedule(&pp->napi); 1808 1809 return IRQ_HANDLED; 1810 } 1811 1812 /* NAPI handler 1813 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 1814 * packets on the corresponding TXQ (Bit 0 is for TX queue 1). 1815 * Bits 8 -15 of the cause Rx Tx register indicate that are received 1816 * packets on the corresponding RXQ (Bit 8 is for RX queue 0). 1817 * Each CPU has its own causeRxTx register 1818 */ 1819 static int mvneta_poll(struct napi_struct *napi, int budget) 1820 { 1821 int rx_done = 0; 1822 u32 cause_rx_tx; 1823 unsigned long flags; 1824 struct mvneta_port *pp = netdev_priv(napi->dev); 1825 1826 if (!netif_running(pp->dev)) { 1827 napi_complete(napi); 1828 return rx_done; 1829 } 1830 1831 /* Read cause register */ 1832 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) & 1833 MVNETA_RX_INTR_MASK(rxq_number); 1834 1835 /* For the case where the last mvneta_poll did not process all 1836 * RX packets 1837 */ 1838 cause_rx_tx |= pp->cause_rx_tx; 1839 if (rxq_number > 1) { 1840 while ((cause_rx_tx != 0) && (budget > 0)) { 1841 int count; 1842 struct mvneta_rx_queue *rxq; 1843 /* get rx queue number from cause_rx_tx */ 1844 rxq = mvneta_rx_policy(pp, cause_rx_tx); 1845 if (!rxq) 1846 break; 1847 1848 /* process the packet in that rx queue */ 1849 count = mvneta_rx(pp, budget, rxq); 1850 rx_done += count; 1851 budget -= count; 1852 if (budget > 0) { 1853 /* set off the rx bit of the 1854 * corresponding bit in the cause rx 1855 * tx register, so that next iteration 1856 * will find the next rx queue where 1857 * packets are received on 1858 */ 1859 cause_rx_tx &= ~((1 << rxq->id) << 8); 1860 } 1861 } 1862 } else { 1863 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); 1864 budget -= rx_done; 1865 } 1866 1867 if (budget > 0) { 1868 cause_rx_tx = 0; 1869 napi_complete(napi); 1870 local_irq_save(flags); 1871 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 1872 MVNETA_RX_INTR_MASK(rxq_number)); 1873 local_irq_restore(flags); 1874 } 1875 1876 pp->cause_rx_tx = cause_rx_tx; 1877 return rx_done; 1878 } 1879 1880 /* tx done timer callback */ 1881 static void mvneta_tx_done_timer_callback(unsigned long data) 1882 { 1883 struct net_device *dev = (struct net_device *)data; 1884 struct mvneta_port *pp = netdev_priv(dev); 1885 int tx_done = 0, tx_todo = 0; 1886 1887 if (!netif_running(dev)) 1888 return ; 1889 1890 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); 1891 1892 tx_done = mvneta_tx_done_gbe(pp, 1893 (((1 << txq_number) - 1) & 1894 MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK), 1895 &tx_todo); 1896 if (tx_todo > 0) 1897 mvneta_add_tx_done_timer(pp); 1898 } 1899 1900 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ 1901 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, 1902 int num) 1903 { 1904 struct net_device *dev = pp->dev; 1905 int i; 1906 1907 for (i = 0; i < num; i++) { 1908 struct sk_buff *skb; 1909 struct mvneta_rx_desc *rx_desc; 1910 unsigned long phys_addr; 1911 1912 skb = dev_alloc_skb(pp->pkt_size); 1913 if (!skb) { 1914 netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n", 1915 __func__, rxq->id, i, num); 1916 break; 1917 } 1918 1919 rx_desc = rxq->descs + i; 1920 memset(rx_desc, 0, sizeof(struct mvneta_rx_desc)); 1921 phys_addr = dma_map_single(dev->dev.parent, skb->head, 1922 MVNETA_RX_BUF_SIZE(pp->pkt_size), 1923 DMA_FROM_DEVICE); 1924 if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) { 1925 dev_kfree_skb(skb); 1926 break; 1927 } 1928 1929 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb); 1930 } 1931 1932 /* Add this number of RX descriptors as non occupied (ready to 1933 * get packets) 1934 */ 1935 mvneta_rxq_non_occup_desc_add(pp, rxq, i); 1936 1937 return i; 1938 } 1939 1940 /* Free all packets pending transmit from all TXQs and reset TX port */ 1941 static void mvneta_tx_reset(struct mvneta_port *pp) 1942 { 1943 int queue; 1944 1945 /* free the skb's in the hal tx ring */ 1946 for (queue = 0; queue < txq_number; queue++) 1947 mvneta_txq_done_force(pp, &pp->txqs[queue]); 1948 1949 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); 1950 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); 1951 } 1952 1953 static void mvneta_rx_reset(struct mvneta_port *pp) 1954 { 1955 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); 1956 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); 1957 } 1958 1959 /* Rx/Tx queue initialization/cleanup methods */ 1960 1961 /* Create a specified RX queue */ 1962 static int mvneta_rxq_init(struct mvneta_port *pp, 1963 struct mvneta_rx_queue *rxq) 1964 1965 { 1966 rxq->size = pp->rx_ring_size; 1967 1968 /* Allocate memory for RX descriptors */ 1969 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 1970 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 1971 &rxq->descs_phys, GFP_KERNEL); 1972 if (rxq->descs == NULL) 1973 return -ENOMEM; 1974 1975 BUG_ON(rxq->descs != 1976 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); 1977 1978 rxq->last_desc = rxq->size - 1; 1979 1980 /* Set Rx descriptors queue starting address */ 1981 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); 1982 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); 1983 1984 /* Set Offset */ 1985 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD); 1986 1987 /* Set coalescing pkts and time */ 1988 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 1989 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 1990 1991 /* Fill RXQ with buffers from RX pool */ 1992 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); 1993 mvneta_rxq_bm_disable(pp, rxq); 1994 mvneta_rxq_fill(pp, rxq, rxq->size); 1995 1996 return 0; 1997 } 1998 1999 /* Cleanup Rx queue */ 2000 static void mvneta_rxq_deinit(struct mvneta_port *pp, 2001 struct mvneta_rx_queue *rxq) 2002 { 2003 mvneta_rxq_drop_pkts(pp, rxq); 2004 2005 if (rxq->descs) 2006 dma_free_coherent(pp->dev->dev.parent, 2007 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 2008 rxq->descs, 2009 rxq->descs_phys); 2010 2011 rxq->descs = NULL; 2012 rxq->last_desc = 0; 2013 rxq->next_desc_to_proc = 0; 2014 rxq->descs_phys = 0; 2015 } 2016 2017 /* Create and initialize a tx queue */ 2018 static int mvneta_txq_init(struct mvneta_port *pp, 2019 struct mvneta_tx_queue *txq) 2020 { 2021 txq->size = pp->tx_ring_size; 2022 2023 /* Allocate memory for TX descriptors */ 2024 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2025 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2026 &txq->descs_phys, GFP_KERNEL); 2027 if (txq->descs == NULL) 2028 return -ENOMEM; 2029 2030 /* Make sure descriptor address is cache line size aligned */ 2031 BUG_ON(txq->descs != 2032 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); 2033 2034 txq->last_desc = txq->size - 1; 2035 2036 /* Set maximum bandwidth for enabled TXQs */ 2037 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); 2038 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); 2039 2040 /* Set Tx descriptors queue starting address */ 2041 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); 2042 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); 2043 2044 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL); 2045 if (txq->tx_skb == NULL) { 2046 dma_free_coherent(pp->dev->dev.parent, 2047 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2048 txq->descs, txq->descs_phys); 2049 return -ENOMEM; 2050 } 2051 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 2052 2053 return 0; 2054 } 2055 2056 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ 2057 static void mvneta_txq_deinit(struct mvneta_port *pp, 2058 struct mvneta_tx_queue *txq) 2059 { 2060 kfree(txq->tx_skb); 2061 2062 if (txq->descs) 2063 dma_free_coherent(pp->dev->dev.parent, 2064 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2065 txq->descs, txq->descs_phys); 2066 2067 txq->descs = NULL; 2068 txq->last_desc = 0; 2069 txq->next_desc_to_proc = 0; 2070 txq->descs_phys = 0; 2071 2072 /* Set minimum bandwidth for disabled TXQs */ 2073 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); 2074 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); 2075 2076 /* Set Tx descriptors queue starting address and size */ 2077 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); 2078 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); 2079 } 2080 2081 /* Cleanup all Tx queues */ 2082 static void mvneta_cleanup_txqs(struct mvneta_port *pp) 2083 { 2084 int queue; 2085 2086 for (queue = 0; queue < txq_number; queue++) 2087 mvneta_txq_deinit(pp, &pp->txqs[queue]); 2088 } 2089 2090 /* Cleanup all Rx queues */ 2091 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) 2092 { 2093 int queue; 2094 2095 for (queue = 0; queue < rxq_number; queue++) 2096 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); 2097 } 2098 2099 2100 /* Init all Rx queues */ 2101 static int mvneta_setup_rxqs(struct mvneta_port *pp) 2102 { 2103 int queue; 2104 2105 for (queue = 0; queue < rxq_number; queue++) { 2106 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); 2107 if (err) { 2108 netdev_err(pp->dev, "%s: can't create rxq=%d\n", 2109 __func__, queue); 2110 mvneta_cleanup_rxqs(pp); 2111 return err; 2112 } 2113 } 2114 2115 return 0; 2116 } 2117 2118 /* Init all tx queues */ 2119 static int mvneta_setup_txqs(struct mvneta_port *pp) 2120 { 2121 int queue; 2122 2123 for (queue = 0; queue < txq_number; queue++) { 2124 int err = mvneta_txq_init(pp, &pp->txqs[queue]); 2125 if (err) { 2126 netdev_err(pp->dev, "%s: can't create txq=%d\n", 2127 __func__, queue); 2128 mvneta_cleanup_txqs(pp); 2129 return err; 2130 } 2131 } 2132 2133 return 0; 2134 } 2135 2136 static void mvneta_start_dev(struct mvneta_port *pp) 2137 { 2138 mvneta_max_rx_size_set(pp, pp->pkt_size); 2139 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 2140 2141 /* start the Rx/Tx activity */ 2142 mvneta_port_enable(pp); 2143 2144 /* Enable polling on the port */ 2145 napi_enable(&pp->napi); 2146 2147 /* Unmask interrupts */ 2148 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2149 MVNETA_RX_INTR_MASK(rxq_number)); 2150 2151 phy_start(pp->phy_dev); 2152 netif_tx_start_all_queues(pp->dev); 2153 } 2154 2155 static void mvneta_stop_dev(struct mvneta_port *pp) 2156 { 2157 phy_stop(pp->phy_dev); 2158 2159 napi_disable(&pp->napi); 2160 2161 netif_carrier_off(pp->dev); 2162 2163 mvneta_port_down(pp); 2164 netif_tx_stop_all_queues(pp->dev); 2165 2166 /* Stop the port activity */ 2167 mvneta_port_disable(pp); 2168 2169 /* Clear all ethernet port interrupts */ 2170 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2171 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); 2172 2173 /* Mask all ethernet port interrupts */ 2174 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2175 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2176 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); 2177 2178 mvneta_tx_reset(pp); 2179 mvneta_rx_reset(pp); 2180 } 2181 2182 /* tx timeout callback - display a message and stop/start the network device */ 2183 static void mvneta_tx_timeout(struct net_device *dev) 2184 { 2185 struct mvneta_port *pp = netdev_priv(dev); 2186 2187 netdev_info(dev, "tx timeout\n"); 2188 mvneta_stop_dev(pp); 2189 mvneta_start_dev(pp); 2190 } 2191 2192 /* Return positive if MTU is valid */ 2193 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu) 2194 { 2195 if (mtu < 68) { 2196 netdev_err(dev, "cannot change mtu to less than 68\n"); 2197 return -EINVAL; 2198 } 2199 2200 /* 9676 == 9700 - 20 and rounding to 8 */ 2201 if (mtu > 9676) { 2202 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu); 2203 mtu = 9676; 2204 } 2205 2206 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { 2207 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", 2208 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); 2209 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); 2210 } 2211 2212 return mtu; 2213 } 2214 2215 /* Change the device mtu */ 2216 static int mvneta_change_mtu(struct net_device *dev, int mtu) 2217 { 2218 struct mvneta_port *pp = netdev_priv(dev); 2219 int ret; 2220 2221 mtu = mvneta_check_mtu_valid(dev, mtu); 2222 if (mtu < 0) 2223 return -EINVAL; 2224 2225 dev->mtu = mtu; 2226 2227 if (!netif_running(dev)) 2228 return 0; 2229 2230 /* The interface is running, so we have to force a 2231 * reallocation of the RXQs 2232 */ 2233 mvneta_stop_dev(pp); 2234 2235 mvneta_cleanup_txqs(pp); 2236 mvneta_cleanup_rxqs(pp); 2237 2238 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2239 2240 ret = mvneta_setup_rxqs(pp); 2241 if (ret) { 2242 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n"); 2243 return ret; 2244 } 2245 2246 mvneta_setup_txqs(pp); 2247 2248 mvneta_start_dev(pp); 2249 mvneta_port_up(pp); 2250 2251 return 0; 2252 } 2253 2254 /* Get mac address */ 2255 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) 2256 { 2257 u32 mac_addr_l, mac_addr_h; 2258 2259 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); 2260 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); 2261 addr[0] = (mac_addr_h >> 24) & 0xFF; 2262 addr[1] = (mac_addr_h >> 16) & 0xFF; 2263 addr[2] = (mac_addr_h >> 8) & 0xFF; 2264 addr[3] = mac_addr_h & 0xFF; 2265 addr[4] = (mac_addr_l >> 8) & 0xFF; 2266 addr[5] = mac_addr_l & 0xFF; 2267 } 2268 2269 /* Handle setting mac address */ 2270 static int mvneta_set_mac_addr(struct net_device *dev, void *addr) 2271 { 2272 struct mvneta_port *pp = netdev_priv(dev); 2273 u8 *mac = addr + 2; 2274 int i; 2275 2276 if (netif_running(dev)) 2277 return -EBUSY; 2278 2279 /* Remove previous address table entry */ 2280 mvneta_mac_addr_set(pp, dev->dev_addr, -1); 2281 2282 /* Set new addr in hw */ 2283 mvneta_mac_addr_set(pp, mac, rxq_def); 2284 2285 /* Set addr in the device */ 2286 for (i = 0; i < ETH_ALEN; i++) 2287 dev->dev_addr[i] = mac[i]; 2288 2289 return 0; 2290 } 2291 2292 static void mvneta_adjust_link(struct net_device *ndev) 2293 { 2294 struct mvneta_port *pp = netdev_priv(ndev); 2295 struct phy_device *phydev = pp->phy_dev; 2296 int status_change = 0; 2297 2298 if (phydev->link) { 2299 if ((pp->speed != phydev->speed) || 2300 (pp->duplex != phydev->duplex)) { 2301 u32 val; 2302 2303 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2304 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | 2305 MVNETA_GMAC_CONFIG_GMII_SPEED | 2306 MVNETA_GMAC_CONFIG_FULL_DUPLEX); 2307 2308 if (phydev->duplex) 2309 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 2310 2311 if (phydev->speed == SPEED_1000) 2312 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 2313 else 2314 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 2315 2316 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2317 2318 pp->duplex = phydev->duplex; 2319 pp->speed = phydev->speed; 2320 } 2321 } 2322 2323 if (phydev->link != pp->link) { 2324 if (!phydev->link) { 2325 pp->duplex = -1; 2326 pp->speed = 0; 2327 } 2328 2329 pp->link = phydev->link; 2330 status_change = 1; 2331 } 2332 2333 if (status_change) { 2334 if (phydev->link) { 2335 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2336 val |= (MVNETA_GMAC_FORCE_LINK_PASS | 2337 MVNETA_GMAC_FORCE_LINK_DOWN); 2338 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2339 mvneta_port_up(pp); 2340 netdev_info(pp->dev, "link up\n"); 2341 } else { 2342 mvneta_port_down(pp); 2343 netdev_info(pp->dev, "link down\n"); 2344 } 2345 } 2346 } 2347 2348 static int mvneta_mdio_probe(struct mvneta_port *pp) 2349 { 2350 struct phy_device *phy_dev; 2351 2352 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, 2353 pp->phy_interface); 2354 if (!phy_dev) { 2355 netdev_err(pp->dev, "could not find the PHY\n"); 2356 return -ENODEV; 2357 } 2358 2359 phy_dev->supported &= PHY_GBIT_FEATURES; 2360 phy_dev->advertising = phy_dev->supported; 2361 2362 pp->phy_dev = phy_dev; 2363 pp->link = 0; 2364 pp->duplex = 0; 2365 pp->speed = 0; 2366 2367 return 0; 2368 } 2369 2370 static void mvneta_mdio_remove(struct mvneta_port *pp) 2371 { 2372 phy_disconnect(pp->phy_dev); 2373 pp->phy_dev = NULL; 2374 } 2375 2376 static int mvneta_open(struct net_device *dev) 2377 { 2378 struct mvneta_port *pp = netdev_priv(dev); 2379 int ret; 2380 2381 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); 2382 2383 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 2384 2385 ret = mvneta_setup_rxqs(pp); 2386 if (ret) 2387 return ret; 2388 2389 ret = mvneta_setup_txqs(pp); 2390 if (ret) 2391 goto err_cleanup_rxqs; 2392 2393 /* Connect to port interrupt line */ 2394 ret = request_irq(pp->dev->irq, mvneta_isr, 0, 2395 MVNETA_DRIVER_NAME, pp); 2396 if (ret) { 2397 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); 2398 goto err_cleanup_txqs; 2399 } 2400 2401 /* In default link is down */ 2402 netif_carrier_off(pp->dev); 2403 2404 ret = mvneta_mdio_probe(pp); 2405 if (ret < 0) { 2406 netdev_err(dev, "cannot probe MDIO bus\n"); 2407 goto err_free_irq; 2408 } 2409 2410 mvneta_start_dev(pp); 2411 2412 return 0; 2413 2414 err_free_irq: 2415 free_irq(pp->dev->irq, pp); 2416 err_cleanup_txqs: 2417 mvneta_cleanup_txqs(pp); 2418 err_cleanup_rxqs: 2419 mvneta_cleanup_rxqs(pp); 2420 return ret; 2421 } 2422 2423 /* Stop the port, free port interrupt line */ 2424 static int mvneta_stop(struct net_device *dev) 2425 { 2426 struct mvneta_port *pp = netdev_priv(dev); 2427 2428 mvneta_stop_dev(pp); 2429 mvneta_mdio_remove(pp); 2430 free_irq(dev->irq, pp); 2431 mvneta_cleanup_rxqs(pp); 2432 mvneta_cleanup_txqs(pp); 2433 del_timer(&pp->tx_done_timer); 2434 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); 2435 2436 return 0; 2437 } 2438 2439 /* Ethtool methods */ 2440 2441 /* Get settings (phy address, speed) for ethtools */ 2442 int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2443 { 2444 struct mvneta_port *pp = netdev_priv(dev); 2445 2446 if (!pp->phy_dev) 2447 return -ENODEV; 2448 2449 return phy_ethtool_gset(pp->phy_dev, cmd); 2450 } 2451 2452 /* Set settings (phy address, speed) for ethtools */ 2453 int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2454 { 2455 struct mvneta_port *pp = netdev_priv(dev); 2456 2457 if (!pp->phy_dev) 2458 return -ENODEV; 2459 2460 return phy_ethtool_sset(pp->phy_dev, cmd); 2461 } 2462 2463 /* Set interrupt coalescing for ethtools */ 2464 static int mvneta_ethtool_set_coalesce(struct net_device *dev, 2465 struct ethtool_coalesce *c) 2466 { 2467 struct mvneta_port *pp = netdev_priv(dev); 2468 int queue; 2469 2470 for (queue = 0; queue < rxq_number; queue++) { 2471 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 2472 rxq->time_coal = c->rx_coalesce_usecs; 2473 rxq->pkts_coal = c->rx_max_coalesced_frames; 2474 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); 2475 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); 2476 } 2477 2478 for (queue = 0; queue < txq_number; queue++) { 2479 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 2480 txq->done_pkts_coal = c->tx_max_coalesced_frames; 2481 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); 2482 } 2483 2484 return 0; 2485 } 2486 2487 /* get coalescing for ethtools */ 2488 static int mvneta_ethtool_get_coalesce(struct net_device *dev, 2489 struct ethtool_coalesce *c) 2490 { 2491 struct mvneta_port *pp = netdev_priv(dev); 2492 2493 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; 2494 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; 2495 2496 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; 2497 return 0; 2498 } 2499 2500 2501 static void mvneta_ethtool_get_drvinfo(struct net_device *dev, 2502 struct ethtool_drvinfo *drvinfo) 2503 { 2504 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, 2505 sizeof(drvinfo->driver)); 2506 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, 2507 sizeof(drvinfo->version)); 2508 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 2509 sizeof(drvinfo->bus_info)); 2510 } 2511 2512 2513 static void mvneta_ethtool_get_ringparam(struct net_device *netdev, 2514 struct ethtool_ringparam *ring) 2515 { 2516 struct mvneta_port *pp = netdev_priv(netdev); 2517 2518 ring->rx_max_pending = MVNETA_MAX_RXD; 2519 ring->tx_max_pending = MVNETA_MAX_TXD; 2520 ring->rx_pending = pp->rx_ring_size; 2521 ring->tx_pending = pp->tx_ring_size; 2522 } 2523 2524 static int mvneta_ethtool_set_ringparam(struct net_device *dev, 2525 struct ethtool_ringparam *ring) 2526 { 2527 struct mvneta_port *pp = netdev_priv(dev); 2528 2529 if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) 2530 return -EINVAL; 2531 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? 2532 ring->rx_pending : MVNETA_MAX_RXD; 2533 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ? 2534 ring->tx_pending : MVNETA_MAX_TXD; 2535 2536 if (netif_running(dev)) { 2537 mvneta_stop(dev); 2538 if (mvneta_open(dev)) { 2539 netdev_err(dev, 2540 "error on opening device after ring param change\n"); 2541 return -ENOMEM; 2542 } 2543 } 2544 2545 return 0; 2546 } 2547 2548 static const struct net_device_ops mvneta_netdev_ops = { 2549 .ndo_open = mvneta_open, 2550 .ndo_stop = mvneta_stop, 2551 .ndo_start_xmit = mvneta_tx, 2552 .ndo_set_rx_mode = mvneta_set_rx_mode, 2553 .ndo_set_mac_address = mvneta_set_mac_addr, 2554 .ndo_change_mtu = mvneta_change_mtu, 2555 .ndo_tx_timeout = mvneta_tx_timeout, 2556 .ndo_get_stats64 = mvneta_get_stats64, 2557 }; 2558 2559 const struct ethtool_ops mvneta_eth_tool_ops = { 2560 .get_link = ethtool_op_get_link, 2561 .get_settings = mvneta_ethtool_get_settings, 2562 .set_settings = mvneta_ethtool_set_settings, 2563 .set_coalesce = mvneta_ethtool_set_coalesce, 2564 .get_coalesce = mvneta_ethtool_get_coalesce, 2565 .get_drvinfo = mvneta_ethtool_get_drvinfo, 2566 .get_ringparam = mvneta_ethtool_get_ringparam, 2567 .set_ringparam = mvneta_ethtool_set_ringparam, 2568 }; 2569 2570 /* Initialize hw */ 2571 static int mvneta_init(struct mvneta_port *pp, int phy_addr) 2572 { 2573 int queue; 2574 2575 /* Disable port */ 2576 mvneta_port_disable(pp); 2577 2578 /* Set port default values */ 2579 mvneta_defaults_set(pp); 2580 2581 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), 2582 GFP_KERNEL); 2583 if (!pp->txqs) 2584 return -ENOMEM; 2585 2586 /* Initialize TX descriptor rings */ 2587 for (queue = 0; queue < txq_number; queue++) { 2588 struct mvneta_tx_queue *txq = &pp->txqs[queue]; 2589 txq->id = queue; 2590 txq->size = pp->tx_ring_size; 2591 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; 2592 } 2593 2594 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), 2595 GFP_KERNEL); 2596 if (!pp->rxqs) { 2597 kfree(pp->txqs); 2598 return -ENOMEM; 2599 } 2600 2601 /* Create Rx descriptor rings */ 2602 for (queue = 0; queue < rxq_number; queue++) { 2603 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; 2604 rxq->id = queue; 2605 rxq->size = pp->rx_ring_size; 2606 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; 2607 rxq->time_coal = MVNETA_RX_COAL_USEC; 2608 } 2609 2610 return 0; 2611 } 2612 2613 static void mvneta_deinit(struct mvneta_port *pp) 2614 { 2615 kfree(pp->txqs); 2616 kfree(pp->rxqs); 2617 } 2618 2619 /* platform glue : initialize decoding windows */ 2620 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, 2621 const struct mbus_dram_target_info *dram) 2622 { 2623 u32 win_enable; 2624 u32 win_protect; 2625 int i; 2626 2627 for (i = 0; i < 6; i++) { 2628 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); 2629 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); 2630 2631 if (i < 4) 2632 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); 2633 } 2634 2635 win_enable = 0x3f; 2636 win_protect = 0; 2637 2638 for (i = 0; i < dram->num_cs; i++) { 2639 const struct mbus_dram_window *cs = dram->cs + i; 2640 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | 2641 (cs->mbus_attr << 8) | dram->mbus_dram_target_id); 2642 2643 mvreg_write(pp, MVNETA_WIN_SIZE(i), 2644 (cs->size - 1) & 0xffff0000); 2645 2646 win_enable &= ~(1 << i); 2647 win_protect |= 3 << (2 * i); 2648 } 2649 2650 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 2651 } 2652 2653 /* Power up the port */ 2654 static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 2655 { 2656 u32 val; 2657 2658 /* MAC Cause register should be cleared */ 2659 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 2660 2661 if (phy_mode == PHY_INTERFACE_MODE_SGMII) 2662 mvneta_port_sgmii_config(pp); 2663 2664 mvneta_gmac_rgmii_set(pp, 1); 2665 2666 /* Cancel Port Reset */ 2667 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 2668 val &= ~MVNETA_GMAC2_PORT_RESET; 2669 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 2670 2671 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 2672 MVNETA_GMAC2_PORT_RESET) != 0) 2673 continue; 2674 } 2675 2676 /* Device initialization routine */ 2677 static int mvneta_probe(struct platform_device *pdev) 2678 { 2679 const struct mbus_dram_target_info *dram_target_info; 2680 struct device_node *dn = pdev->dev.of_node; 2681 struct device_node *phy_node; 2682 u32 phy_addr; 2683 struct mvneta_port *pp; 2684 struct net_device *dev; 2685 const char *dt_mac_addr; 2686 char hw_mac_addr[ETH_ALEN]; 2687 const char *mac_from; 2688 int phy_mode; 2689 int err; 2690 2691 /* Our multiqueue support is not complete, so for now, only 2692 * allow the usage of the first RX queue 2693 */ 2694 if (rxq_def != 0) { 2695 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def); 2696 return -EINVAL; 2697 } 2698 2699 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); 2700 if (!dev) 2701 return -ENOMEM; 2702 2703 dev->irq = irq_of_parse_and_map(dn, 0); 2704 if (dev->irq == 0) { 2705 err = -EINVAL; 2706 goto err_free_netdev; 2707 } 2708 2709 phy_node = of_parse_phandle(dn, "phy", 0); 2710 if (!phy_node) { 2711 dev_err(&pdev->dev, "no associated PHY\n"); 2712 err = -ENODEV; 2713 goto err_free_irq; 2714 } 2715 2716 phy_mode = of_get_phy_mode(dn); 2717 if (phy_mode < 0) { 2718 dev_err(&pdev->dev, "incorrect phy-mode\n"); 2719 err = -EINVAL; 2720 goto err_free_irq; 2721 } 2722 2723 dev->tx_queue_len = MVNETA_MAX_TXD; 2724 dev->watchdog_timeo = 5 * HZ; 2725 dev->netdev_ops = &mvneta_netdev_ops; 2726 2727 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops); 2728 2729 pp = netdev_priv(dev); 2730 2731 pp->tx_done_timer.function = mvneta_tx_done_timer_callback; 2732 init_timer(&pp->tx_done_timer); 2733 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); 2734 2735 pp->weight = MVNETA_RX_POLL_WEIGHT; 2736 pp->phy_node = phy_node; 2737 pp->phy_interface = phy_mode; 2738 2739 pp->base = of_iomap(dn, 0); 2740 if (pp->base == NULL) { 2741 err = -ENOMEM; 2742 goto err_free_irq; 2743 } 2744 2745 pp->clk = devm_clk_get(&pdev->dev, NULL); 2746 if (IS_ERR(pp->clk)) { 2747 err = PTR_ERR(pp->clk); 2748 goto err_unmap; 2749 } 2750 2751 clk_prepare_enable(pp->clk); 2752 2753 dt_mac_addr = of_get_mac_address(dn); 2754 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { 2755 mac_from = "device tree"; 2756 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN); 2757 } else { 2758 mvneta_get_mac_addr(pp, hw_mac_addr); 2759 if (is_valid_ether_addr(hw_mac_addr)) { 2760 mac_from = "hardware"; 2761 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); 2762 } else { 2763 mac_from = "random"; 2764 eth_hw_addr_random(dev); 2765 } 2766 } 2767 2768 pp->tx_done_timer.data = (unsigned long)dev; 2769 2770 pp->tx_ring_size = MVNETA_MAX_TXD; 2771 pp->rx_ring_size = MVNETA_MAX_RXD; 2772 2773 pp->dev = dev; 2774 SET_NETDEV_DEV(dev, &pdev->dev); 2775 2776 err = mvneta_init(pp, phy_addr); 2777 if (err < 0) { 2778 dev_err(&pdev->dev, "can't init eth hal\n"); 2779 goto err_clk; 2780 } 2781 mvneta_port_power_up(pp, phy_mode); 2782 2783 dram_target_info = mv_mbus_dram_info(); 2784 if (dram_target_info) 2785 mvneta_conf_mbus_windows(pp, dram_target_info); 2786 2787 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); 2788 2789 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2790 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; 2791 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; 2792 dev->priv_flags |= IFF_UNICAST_FLT; 2793 2794 err = register_netdev(dev); 2795 if (err < 0) { 2796 dev_err(&pdev->dev, "failed to register\n"); 2797 goto err_deinit; 2798 } 2799 2800 netdev_info(dev, "Using %s mac address %pM\n", mac_from, 2801 dev->dev_addr); 2802 2803 platform_set_drvdata(pdev, pp->dev); 2804 2805 return 0; 2806 2807 err_deinit: 2808 mvneta_deinit(pp); 2809 err_clk: 2810 clk_disable_unprepare(pp->clk); 2811 err_unmap: 2812 iounmap(pp->base); 2813 err_free_irq: 2814 irq_dispose_mapping(dev->irq); 2815 err_free_netdev: 2816 free_netdev(dev); 2817 return err; 2818 } 2819 2820 /* Device removal routine */ 2821 static int mvneta_remove(struct platform_device *pdev) 2822 { 2823 struct net_device *dev = platform_get_drvdata(pdev); 2824 struct mvneta_port *pp = netdev_priv(dev); 2825 2826 unregister_netdev(dev); 2827 mvneta_deinit(pp); 2828 clk_disable_unprepare(pp->clk); 2829 iounmap(pp->base); 2830 irq_dispose_mapping(dev->irq); 2831 free_netdev(dev); 2832 2833 return 0; 2834 } 2835 2836 static const struct of_device_id mvneta_match[] = { 2837 { .compatible = "marvell,armada-370-neta" }, 2838 { } 2839 }; 2840 MODULE_DEVICE_TABLE(of, mvneta_match); 2841 2842 static struct platform_driver mvneta_driver = { 2843 .probe = mvneta_probe, 2844 .remove = mvneta_remove, 2845 .driver = { 2846 .name = MVNETA_DRIVER_NAME, 2847 .of_match_table = mvneta_match, 2848 }, 2849 }; 2850 2851 module_platform_driver(mvneta_driver); 2852 2853 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 2854 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 2855 MODULE_LICENSE("GPL"); 2856 2857 module_param(rxq_number, int, S_IRUGO); 2858 module_param(txq_number, int, S_IRUGO); 2859 2860 module_param(rxq_def, int, S_IRUGO); 2861