1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 4 * 5 * Copyright (C) 2014 Marvell 6 * 7 * Marcin Wojtas <mw@semihalf.com> 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/kernel.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/platform_device.h> 15 #include <linux/skbuff.h> 16 #include <linux/inetdevice.h> 17 #include <linux/mbus.h> 18 #include <linux/module.h> 19 #include <linux/mfd/syscon.h> 20 #include <linux/interrupt.h> 21 #include <linux/cpumask.h> 22 #include <linux/of.h> 23 #include <linux/of_irq.h> 24 #include <linux/of_mdio.h> 25 #include <linux/of_net.h> 26 #include <linux/of_address.h> 27 #include <linux/of_device.h> 28 #include <linux/phy.h> 29 #include <linux/phylink.h> 30 #include <linux/phy/phy.h> 31 #include <linux/clk.h> 32 #include <linux/hrtimer.h> 33 #include <linux/ktime.h> 34 #include <linux/regmap.h> 35 #include <uapi/linux/ppp_defs.h> 36 #include <net/ip.h> 37 #include <net/ipv6.h> 38 #include <net/tso.h> 39 40 #include "mvpp2.h" 41 #include "mvpp2_prs.h" 42 #include "mvpp2_cls.h" 43 44 enum mvpp2_bm_pool_log_num { 45 MVPP2_BM_SHORT, 46 MVPP2_BM_LONG, 47 MVPP2_BM_JUMBO, 48 MVPP2_BM_POOLS_NUM 49 }; 50 51 static struct { 52 int pkt_size; 53 int buf_num; 54 } mvpp2_pools[MVPP2_BM_POOLS_NUM]; 55 56 /* The prototype is added here to be used in start_dev when using ACPI. This 57 * will be removed once phylink is used for all modes (dt+ACPI). 58 */ 59 static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, 60 const struct phylink_link_state *state); 61 62 /* Queue modes */ 63 #define MVPP2_QDIST_SINGLE_MODE 0 64 #define MVPP2_QDIST_MULTI_MODE 1 65 66 static int queue_mode = MVPP2_QDIST_MULTI_MODE; 67 68 module_param(queue_mode, int, 0444); 69 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); 70 71 /* Utility/helper methods */ 72 73 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 74 { 75 writel(data, priv->swth_base[0] + offset); 76 } 77 78 u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 79 { 80 return readl(priv->swth_base[0] + offset); 81 } 82 83 u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) 84 { 85 return readl_relaxed(priv->swth_base[0] + offset); 86 } 87 /* These accessors should be used to access: 88 * 89 * - per-CPU registers, where each CPU has its own copy of the 90 * register. 91 * 92 * MVPP2_BM_VIRT_ALLOC_REG 93 * MVPP2_BM_ADDR_HIGH_ALLOC 94 * MVPP22_BM_ADDR_HIGH_RLS_REG 95 * MVPP2_BM_VIRT_RLS_REG 96 * MVPP2_ISR_RX_TX_CAUSE_REG 97 * MVPP2_ISR_RX_TX_MASK_REG 98 * MVPP2_TXQ_NUM_REG 99 * MVPP2_AGGR_TXQ_UPDATE_REG 100 * MVPP2_TXQ_RSVD_REQ_REG 101 * MVPP2_TXQ_RSVD_RSLT_REG 102 * MVPP2_TXQ_SENT_REG 103 * MVPP2_RXQ_NUM_REG 104 * 105 * - global registers that must be accessed through a specific CPU 106 * window, because they are related to an access to a per-CPU 107 * register 108 * 109 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) 110 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) 111 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) 112 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) 113 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) 114 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) 115 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 116 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) 117 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) 118 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) 119 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 120 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 121 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 122 */ 123 void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, 124 u32 offset, u32 data) 125 { 126 writel(data, priv->swth_base[cpu] + offset); 127 } 128 129 u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, 130 u32 offset) 131 { 132 return readl(priv->swth_base[cpu] + offset); 133 } 134 135 void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, 136 u32 offset, u32 data) 137 { 138 writel_relaxed(data, priv->swth_base[cpu] + offset); 139 } 140 141 static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, 142 u32 offset) 143 { 144 return readl_relaxed(priv->swth_base[cpu] + offset); 145 } 146 147 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, 148 struct mvpp2_tx_desc *tx_desc) 149 { 150 if (port->priv->hw_version == MVPP21) 151 return le32_to_cpu(tx_desc->pp21.buf_dma_addr); 152 else 153 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & 154 MVPP2_DESC_DMA_MASK; 155 } 156 157 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 158 struct mvpp2_tx_desc *tx_desc, 159 dma_addr_t dma_addr) 160 { 161 dma_addr_t addr, offset; 162 163 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; 164 offset = dma_addr & MVPP2_TX_DESC_ALIGN; 165 166 if (port->priv->hw_version == MVPP21) { 167 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); 168 tx_desc->pp21.packet_offset = offset; 169 } else { 170 __le64 val = cpu_to_le64(addr); 171 172 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); 173 tx_desc->pp22.buf_dma_addr_ptp |= val; 174 tx_desc->pp22.packet_offset = offset; 175 } 176 } 177 178 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, 179 struct mvpp2_tx_desc *tx_desc) 180 { 181 if (port->priv->hw_version == MVPP21) 182 return le16_to_cpu(tx_desc->pp21.data_size); 183 else 184 return le16_to_cpu(tx_desc->pp22.data_size); 185 } 186 187 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 188 struct mvpp2_tx_desc *tx_desc, 189 size_t size) 190 { 191 if (port->priv->hw_version == MVPP21) 192 tx_desc->pp21.data_size = cpu_to_le16(size); 193 else 194 tx_desc->pp22.data_size = cpu_to_le16(size); 195 } 196 197 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 198 struct mvpp2_tx_desc *tx_desc, 199 unsigned int txq) 200 { 201 if (port->priv->hw_version == MVPP21) 202 tx_desc->pp21.phys_txq = txq; 203 else 204 tx_desc->pp22.phys_txq = txq; 205 } 206 207 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 208 struct mvpp2_tx_desc *tx_desc, 209 unsigned int command) 210 { 211 if (port->priv->hw_version == MVPP21) 212 tx_desc->pp21.command = cpu_to_le32(command); 213 else 214 tx_desc->pp22.command = cpu_to_le32(command); 215 } 216 217 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, 218 struct mvpp2_tx_desc *tx_desc) 219 { 220 if (port->priv->hw_version == MVPP21) 221 return tx_desc->pp21.packet_offset; 222 else 223 return tx_desc->pp22.packet_offset; 224 } 225 226 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 227 struct mvpp2_rx_desc *rx_desc) 228 { 229 if (port->priv->hw_version == MVPP21) 230 return le32_to_cpu(rx_desc->pp21.buf_dma_addr); 231 else 232 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & 233 MVPP2_DESC_DMA_MASK; 234 } 235 236 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 237 struct mvpp2_rx_desc *rx_desc) 238 { 239 if (port->priv->hw_version == MVPP21) 240 return le32_to_cpu(rx_desc->pp21.buf_cookie); 241 else 242 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & 243 MVPP2_DESC_DMA_MASK; 244 } 245 246 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 247 struct mvpp2_rx_desc *rx_desc) 248 { 249 if (port->priv->hw_version == MVPP21) 250 return le16_to_cpu(rx_desc->pp21.data_size); 251 else 252 return le16_to_cpu(rx_desc->pp22.data_size); 253 } 254 255 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 256 struct mvpp2_rx_desc *rx_desc) 257 { 258 if (port->priv->hw_version == MVPP21) 259 return le32_to_cpu(rx_desc->pp21.status); 260 else 261 return le32_to_cpu(rx_desc->pp22.status); 262 } 263 264 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 265 { 266 txq_pcpu->txq_get_index++; 267 if (txq_pcpu->txq_get_index == txq_pcpu->size) 268 txq_pcpu->txq_get_index = 0; 269 } 270 271 static void mvpp2_txq_inc_put(struct mvpp2_port *port, 272 struct mvpp2_txq_pcpu *txq_pcpu, 273 struct sk_buff *skb, 274 struct mvpp2_tx_desc *tx_desc) 275 { 276 struct mvpp2_txq_pcpu_buf *tx_buf = 277 txq_pcpu->buffs + txq_pcpu->txq_put_index; 278 tx_buf->skb = skb; 279 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); 280 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + 281 mvpp2_txdesc_offset_get(port, tx_desc); 282 txq_pcpu->txq_put_index++; 283 if (txq_pcpu->txq_put_index == txq_pcpu->size) 284 txq_pcpu->txq_put_index = 0; 285 } 286 287 /* Get number of physical egress port */ 288 static inline int mvpp2_egress_port(struct mvpp2_port *port) 289 { 290 return MVPP2_MAX_TCONT + port->id; 291 } 292 293 /* Get number of physical TXQ */ 294 static inline int mvpp2_txq_phys(int port, int txq) 295 { 296 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 297 } 298 299 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) 300 { 301 if (likely(pool->frag_size <= PAGE_SIZE)) 302 return netdev_alloc_frag(pool->frag_size); 303 else 304 return kmalloc(pool->frag_size, GFP_ATOMIC); 305 } 306 307 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) 308 { 309 if (likely(pool->frag_size <= PAGE_SIZE)) 310 skb_free_frag(data); 311 else 312 kfree(data); 313 } 314 315 /* Buffer Manager configuration routines */ 316 317 /* Create pool */ 318 static int mvpp2_bm_pool_create(struct platform_device *pdev, 319 struct mvpp2 *priv, 320 struct mvpp2_bm_pool *bm_pool, int size) 321 { 322 u32 val; 323 324 /* Number of buffer pointers must be a multiple of 16, as per 325 * hardware constraints 326 */ 327 if (!IS_ALIGNED(size, 16)) 328 return -EINVAL; 329 330 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 331 * bytes per buffer pointer 332 */ 333 if (priv->hw_version == MVPP21) 334 bm_pool->size_bytes = 2 * sizeof(u32) * size; 335 else 336 bm_pool->size_bytes = 2 * sizeof(u64) * size; 337 338 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, 339 &bm_pool->dma_addr, 340 GFP_KERNEL); 341 if (!bm_pool->virt_addr) 342 return -ENOMEM; 343 344 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 345 MVPP2_BM_POOL_PTR_ALIGN)) { 346 dma_free_coherent(&pdev->dev, bm_pool->size_bytes, 347 bm_pool->virt_addr, bm_pool->dma_addr); 348 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 349 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 350 return -ENOMEM; 351 } 352 353 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 354 lower_32_bits(bm_pool->dma_addr)); 355 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 356 357 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 358 val |= MVPP2_BM_START_MASK; 359 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 360 361 bm_pool->size = size; 362 bm_pool->pkt_size = 0; 363 bm_pool->buf_num = 0; 364 365 return 0; 366 } 367 368 /* Set pool buffer size */ 369 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 370 struct mvpp2_bm_pool *bm_pool, 371 int buf_size) 372 { 373 u32 val; 374 375 bm_pool->buf_size = buf_size; 376 377 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 378 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 379 } 380 381 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, 382 struct mvpp2_bm_pool *bm_pool, 383 dma_addr_t *dma_addr, 384 phys_addr_t *phys_addr) 385 { 386 int cpu = get_cpu(); 387 388 *dma_addr = mvpp2_percpu_read(priv, cpu, 389 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 390 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); 391 392 if (priv->hw_version == MVPP22) { 393 u32 val; 394 u32 dma_addr_highbits, phys_addr_highbits; 395 396 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); 397 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); 398 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> 399 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; 400 401 if (sizeof(dma_addr_t) == 8) 402 *dma_addr |= (u64)dma_addr_highbits << 32; 403 404 if (sizeof(phys_addr_t) == 8) 405 *phys_addr |= (u64)phys_addr_highbits << 32; 406 } 407 408 put_cpu(); 409 } 410 411 /* Free all buffers from the pool */ 412 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, 413 struct mvpp2_bm_pool *bm_pool, int buf_num) 414 { 415 int i; 416 417 if (buf_num > bm_pool->buf_num) { 418 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", 419 bm_pool->id, buf_num); 420 buf_num = bm_pool->buf_num; 421 } 422 423 for (i = 0; i < buf_num; i++) { 424 dma_addr_t buf_dma_addr; 425 phys_addr_t buf_phys_addr; 426 void *data; 427 428 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, 429 &buf_dma_addr, &buf_phys_addr); 430 431 dma_unmap_single(dev, buf_dma_addr, 432 bm_pool->buf_size, DMA_FROM_DEVICE); 433 434 data = (void *)phys_to_virt(buf_phys_addr); 435 if (!data) 436 break; 437 438 mvpp2_frag_free(bm_pool, data); 439 } 440 441 /* Update BM driver with number of buffers removed from pool */ 442 bm_pool->buf_num -= i; 443 } 444 445 /* Check number of buffers in BM pool */ 446 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) 447 { 448 int buf_num = 0; 449 450 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & 451 MVPP22_BM_POOL_PTRS_NUM_MASK; 452 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & 453 MVPP2_BM_BPPI_PTR_NUM_MASK; 454 455 /* HW has one buffer ready which is not reflected in the counters */ 456 if (buf_num) 457 buf_num += 1; 458 459 return buf_num; 460 } 461 462 /* Cleanup pool */ 463 static int mvpp2_bm_pool_destroy(struct platform_device *pdev, 464 struct mvpp2 *priv, 465 struct mvpp2_bm_pool *bm_pool) 466 { 467 int buf_num; 468 u32 val; 469 470 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); 471 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num); 472 473 /* Check buffer counters after free */ 474 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); 475 if (buf_num) { 476 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", 477 bm_pool->id, bm_pool->buf_num); 478 return 0; 479 } 480 481 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 482 val |= MVPP2_BM_STOP_MASK; 483 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 484 485 dma_free_coherent(&pdev->dev, bm_pool->size_bytes, 486 bm_pool->virt_addr, 487 bm_pool->dma_addr); 488 return 0; 489 } 490 491 static int mvpp2_bm_pools_init(struct platform_device *pdev, 492 struct mvpp2 *priv) 493 { 494 int i, err, size; 495 struct mvpp2_bm_pool *bm_pool; 496 497 /* Create all pools with maximum size */ 498 size = MVPP2_BM_POOL_SIZE_MAX; 499 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 500 bm_pool = &priv->bm_pools[i]; 501 bm_pool->id = i; 502 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); 503 if (err) 504 goto err_unroll_pools; 505 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 506 } 507 return 0; 508 509 err_unroll_pools: 510 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); 511 for (i = i - 1; i >= 0; i--) 512 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); 513 return err; 514 } 515 516 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) 517 { 518 int i, err; 519 520 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 521 /* Mask BM all interrupts */ 522 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 523 /* Clear BM cause register */ 524 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 525 } 526 527 /* Allocate and initialize BM pools */ 528 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, 529 sizeof(*priv->bm_pools), GFP_KERNEL); 530 if (!priv->bm_pools) 531 return -ENOMEM; 532 533 err = mvpp2_bm_pools_init(pdev, priv); 534 if (err < 0) 535 return err; 536 return 0; 537 } 538 539 static void mvpp2_setup_bm_pool(void) 540 { 541 /* Short pool */ 542 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; 543 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; 544 545 /* Long pool */ 546 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; 547 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; 548 549 /* Jumbo pool */ 550 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; 551 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; 552 } 553 554 /* Attach long pool to rxq */ 555 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 556 int lrxq, int long_pool) 557 { 558 u32 val, mask; 559 int prxq; 560 561 /* Get queue physical ID */ 562 prxq = port->rxqs[lrxq]->id; 563 564 if (port->priv->hw_version == MVPP21) 565 mask = MVPP21_RXQ_POOL_LONG_MASK; 566 else 567 mask = MVPP22_RXQ_POOL_LONG_MASK; 568 569 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 570 val &= ~mask; 571 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 572 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 573 } 574 575 /* Attach short pool to rxq */ 576 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, 577 int lrxq, int short_pool) 578 { 579 u32 val, mask; 580 int prxq; 581 582 /* Get queue physical ID */ 583 prxq = port->rxqs[lrxq]->id; 584 585 if (port->priv->hw_version == MVPP21) 586 mask = MVPP21_RXQ_POOL_SHORT_MASK; 587 else 588 mask = MVPP22_RXQ_POOL_SHORT_MASK; 589 590 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 591 val &= ~mask; 592 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; 593 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 594 } 595 596 static void *mvpp2_buf_alloc(struct mvpp2_port *port, 597 struct mvpp2_bm_pool *bm_pool, 598 dma_addr_t *buf_dma_addr, 599 phys_addr_t *buf_phys_addr, 600 gfp_t gfp_mask) 601 { 602 dma_addr_t dma_addr; 603 void *data; 604 605 data = mvpp2_frag_alloc(bm_pool); 606 if (!data) 607 return NULL; 608 609 dma_addr = dma_map_single(port->dev->dev.parent, data, 610 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), 611 DMA_FROM_DEVICE); 612 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { 613 mvpp2_frag_free(bm_pool, data); 614 return NULL; 615 } 616 *buf_dma_addr = dma_addr; 617 *buf_phys_addr = virt_to_phys(data); 618 619 return data; 620 } 621 622 /* Release buffer to BM */ 623 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 624 dma_addr_t buf_dma_addr, 625 phys_addr_t buf_phys_addr) 626 { 627 int cpu = get_cpu(); 628 629 if (port->priv->hw_version == MVPP22) { 630 u32 val = 0; 631 632 if (sizeof(dma_addr_t) == 8) 633 val |= upper_32_bits(buf_dma_addr) & 634 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 635 636 if (sizeof(phys_addr_t) == 8) 637 val |= (upper_32_bits(buf_phys_addr) 638 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 639 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 640 641 mvpp2_percpu_write_relaxed(port->priv, cpu, 642 MVPP22_BM_ADDR_HIGH_RLS_REG, val); 643 } 644 645 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 646 * returned in the "cookie" field of the RX 647 * descriptor. Instead of storing the virtual address, we 648 * store the physical address 649 */ 650 mvpp2_percpu_write_relaxed(port->priv, cpu, 651 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 652 mvpp2_percpu_write_relaxed(port->priv, cpu, 653 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 654 655 put_cpu(); 656 } 657 658 /* Allocate buffers for the pool */ 659 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 660 struct mvpp2_bm_pool *bm_pool, int buf_num) 661 { 662 int i, buf_size, total_size; 663 dma_addr_t dma_addr; 664 phys_addr_t phys_addr; 665 void *buf; 666 667 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); 668 total_size = MVPP2_RX_TOTAL_SIZE(buf_size); 669 670 if (buf_num < 0 || 671 (buf_num + bm_pool->buf_num > bm_pool->size)) { 672 netdev_err(port->dev, 673 "cannot allocate %d buffers for pool %d\n", 674 buf_num, bm_pool->id); 675 return 0; 676 } 677 678 for (i = 0; i < buf_num; i++) { 679 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, 680 &phys_addr, GFP_KERNEL); 681 if (!buf) 682 break; 683 684 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, 685 phys_addr); 686 } 687 688 /* Update BM driver with number of buffers added to pool */ 689 bm_pool->buf_num += i; 690 691 netdev_dbg(port->dev, 692 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", 693 bm_pool->id, bm_pool->pkt_size, buf_size, total_size); 694 695 netdev_dbg(port->dev, 696 "pool %d: %d of %d buffers added\n", 697 bm_pool->id, i, buf_num); 698 return i; 699 } 700 701 /* Notify the driver that BM pool is being used as specific type and return the 702 * pool pointer on success 703 */ 704 static struct mvpp2_bm_pool * 705 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) 706 { 707 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 708 int num; 709 710 if (pool >= MVPP2_BM_POOLS_NUM) { 711 netdev_err(port->dev, "Invalid pool %d\n", pool); 712 return NULL; 713 } 714 715 /* Allocate buffers in case BM pool is used as long pool, but packet 716 * size doesn't match MTU or BM pool hasn't being used yet 717 */ 718 if (new_pool->pkt_size == 0) { 719 int pkts_num; 720 721 /* Set default buffer number or free all the buffers in case 722 * the pool is not empty 723 */ 724 pkts_num = new_pool->buf_num; 725 if (pkts_num == 0) 726 pkts_num = mvpp2_pools[pool].buf_num; 727 else 728 mvpp2_bm_bufs_free(port->dev->dev.parent, 729 port->priv, new_pool, pkts_num); 730 731 new_pool->pkt_size = pkt_size; 732 new_pool->frag_size = 733 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 734 MVPP2_SKB_SHINFO_SIZE; 735 736 /* Allocate buffers for this pool */ 737 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 738 if (num != pkts_num) { 739 WARN(1, "pool %d: %d of %d allocated\n", 740 new_pool->id, num, pkts_num); 741 return NULL; 742 } 743 } 744 745 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 746 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 747 748 return new_pool; 749 } 750 751 /* Initialize pools for swf */ 752 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 753 { 754 int rxq; 755 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; 756 757 /* If port pkt_size is higher than 1518B: 758 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool 759 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool 760 */ 761 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { 762 long_log_pool = MVPP2_BM_JUMBO; 763 short_log_pool = MVPP2_BM_LONG; 764 } else { 765 long_log_pool = MVPP2_BM_LONG; 766 short_log_pool = MVPP2_BM_SHORT; 767 } 768 769 if (!port->pool_long) { 770 port->pool_long = 771 mvpp2_bm_pool_use(port, long_log_pool, 772 mvpp2_pools[long_log_pool].pkt_size); 773 if (!port->pool_long) 774 return -ENOMEM; 775 776 port->pool_long->port_map |= BIT(port->id); 777 778 for (rxq = 0; rxq < port->nrxqs; rxq++) 779 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 780 } 781 782 if (!port->pool_short) { 783 port->pool_short = 784 mvpp2_bm_pool_use(port, short_log_pool, 785 mvpp2_pools[short_log_pool].pkt_size); 786 if (!port->pool_short) 787 return -ENOMEM; 788 789 port->pool_short->port_map |= BIT(port->id); 790 791 for (rxq = 0; rxq < port->nrxqs; rxq++) 792 mvpp2_rxq_short_pool_set(port, rxq, 793 port->pool_short->id); 794 } 795 796 return 0; 797 } 798 799 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) 800 { 801 struct mvpp2_port *port = netdev_priv(dev); 802 enum mvpp2_bm_pool_log_num new_long_pool; 803 int pkt_size = MVPP2_RX_PKT_SIZE(mtu); 804 805 /* If port MTU is higher than 1518B: 806 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool 807 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool 808 */ 809 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 810 new_long_pool = MVPP2_BM_JUMBO; 811 else 812 new_long_pool = MVPP2_BM_LONG; 813 814 if (new_long_pool != port->pool_long->id) { 815 /* Remove port from old short & long pool */ 816 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, 817 port->pool_long->pkt_size); 818 port->pool_long->port_map &= ~BIT(port->id); 819 port->pool_long = NULL; 820 821 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, 822 port->pool_short->pkt_size); 823 port->pool_short->port_map &= ~BIT(port->id); 824 port->pool_short = NULL; 825 826 port->pkt_size = pkt_size; 827 828 /* Add port to new short & long pool */ 829 mvpp2_swf_bm_pool_init(port); 830 831 /* Update L4 checksum when jumbo enable/disable on port */ 832 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { 833 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 834 dev->hw_features &= ~(NETIF_F_IP_CSUM | 835 NETIF_F_IPV6_CSUM); 836 } else { 837 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 838 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 839 } 840 } 841 842 dev->mtu = mtu; 843 dev->wanted_features = dev->features; 844 845 netdev_update_features(dev); 846 return 0; 847 } 848 849 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) 850 { 851 int i, sw_thread_mask = 0; 852 853 for (i = 0; i < port->nqvecs; i++) 854 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 855 856 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 857 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); 858 } 859 860 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) 861 { 862 int i, sw_thread_mask = 0; 863 864 for (i = 0; i < port->nqvecs; i++) 865 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 866 867 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 868 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); 869 } 870 871 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) 872 { 873 struct mvpp2_port *port = qvec->port; 874 875 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 876 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); 877 } 878 879 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) 880 { 881 struct mvpp2_port *port = qvec->port; 882 883 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 884 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); 885 } 886 887 /* Mask the current CPU's Rx/Tx interrupts 888 * Called by on_each_cpu(), guaranteed to run with migration disabled, 889 * using smp_processor_id() is OK. 890 */ 891 static void mvpp2_interrupts_mask(void *arg) 892 { 893 struct mvpp2_port *port = arg; 894 895 mvpp2_percpu_write(port->priv, smp_processor_id(), 896 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); 897 } 898 899 /* Unmask the current CPU's Rx/Tx interrupts. 900 * Called by on_each_cpu(), guaranteed to run with migration disabled, 901 * using smp_processor_id() is OK. 902 */ 903 static void mvpp2_interrupts_unmask(void *arg) 904 { 905 struct mvpp2_port *port = arg; 906 u32 val; 907 908 val = MVPP2_CAUSE_MISC_SUM_MASK | 909 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 910 if (port->has_tx_irqs) 911 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 912 913 mvpp2_percpu_write(port->priv, smp_processor_id(), 914 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 915 } 916 917 static void 918 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) 919 { 920 u32 val; 921 int i; 922 923 if (port->priv->hw_version != MVPP22) 924 return; 925 926 if (mask) 927 val = 0; 928 else 929 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 930 931 for (i = 0; i < port->nqvecs; i++) { 932 struct mvpp2_queue_vector *v = port->qvecs + i; 933 934 if (v->type != MVPP2_QUEUE_VECTOR_SHARED) 935 continue; 936 937 mvpp2_percpu_write(port->priv, v->sw_thread_id, 938 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 939 } 940 } 941 942 /* Port configuration routines */ 943 944 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) 945 { 946 struct mvpp2 *priv = port->priv; 947 u32 val; 948 949 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 950 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; 951 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 952 953 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 954 if (port->gop_id == 2) 955 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; 956 else if (port->gop_id == 3) 957 val |= GENCONF_CTRL0_PORT1_RGMII_MII; 958 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 959 } 960 961 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) 962 { 963 struct mvpp2 *priv = port->priv; 964 u32 val; 965 966 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 967 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | 968 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; 969 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 970 971 if (port->gop_id > 1) { 972 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 973 if (port->gop_id == 2) 974 val &= ~GENCONF_CTRL0_PORT0_RGMII; 975 else if (port->gop_id == 3) 976 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; 977 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 978 } 979 } 980 981 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) 982 { 983 struct mvpp2 *priv = port->priv; 984 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 985 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 986 u32 val; 987 988 /* XPCS */ 989 val = readl(xpcs + MVPP22_XPCS_CFG0); 990 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | 991 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); 992 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); 993 writel(val, xpcs + MVPP22_XPCS_CFG0); 994 995 /* MPCS */ 996 val = readl(mpcs + MVPP22_MPCS_CTRL); 997 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; 998 writel(val, mpcs + MVPP22_MPCS_CTRL); 999 1000 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 1001 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC | 1002 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); 1003 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); 1004 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 1005 1006 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; 1007 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; 1008 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 1009 } 1010 1011 static int mvpp22_gop_init(struct mvpp2_port *port) 1012 { 1013 struct mvpp2 *priv = port->priv; 1014 u32 val; 1015 1016 if (!priv->sysctrl_base) 1017 return 0; 1018 1019 switch (port->phy_interface) { 1020 case PHY_INTERFACE_MODE_RGMII: 1021 case PHY_INTERFACE_MODE_RGMII_ID: 1022 case PHY_INTERFACE_MODE_RGMII_RXID: 1023 case PHY_INTERFACE_MODE_RGMII_TXID: 1024 if (port->gop_id == 0) 1025 goto invalid_conf; 1026 mvpp22_gop_init_rgmii(port); 1027 break; 1028 case PHY_INTERFACE_MODE_SGMII: 1029 case PHY_INTERFACE_MODE_1000BASEX: 1030 case PHY_INTERFACE_MODE_2500BASEX: 1031 mvpp22_gop_init_sgmii(port); 1032 break; 1033 case PHY_INTERFACE_MODE_10GKR: 1034 if (port->gop_id != 0) 1035 goto invalid_conf; 1036 mvpp22_gop_init_10gkr(port); 1037 break; 1038 default: 1039 goto unsupported_conf; 1040 } 1041 1042 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); 1043 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | 1044 GENCONF_PORT_CTRL1_EN(port->gop_id); 1045 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); 1046 1047 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1048 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; 1049 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1050 1051 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); 1052 val |= GENCONF_SOFT_RESET1_GOP; 1053 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); 1054 1055 unsupported_conf: 1056 return 0; 1057 1058 invalid_conf: 1059 netdev_err(port->dev, "Invalid port configuration\n"); 1060 return -EINVAL; 1061 } 1062 1063 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) 1064 { 1065 u32 val; 1066 1067 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1068 port->phy_interface == PHY_INTERFACE_MODE_SGMII || 1069 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || 1070 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { 1071 /* Enable the GMAC link status irq for this port */ 1072 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 1073 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 1074 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 1075 } 1076 1077 if (port->gop_id == 0) { 1078 /* Enable the XLG/GIG irqs for this port */ 1079 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 1080 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) 1081 val |= MVPP22_XLG_EXT_INT_MASK_XLG; 1082 else 1083 val |= MVPP22_XLG_EXT_INT_MASK_GIG; 1084 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 1085 } 1086 } 1087 1088 static void mvpp22_gop_mask_irq(struct mvpp2_port *port) 1089 { 1090 u32 val; 1091 1092 if (port->gop_id == 0) { 1093 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 1094 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | 1095 MVPP22_XLG_EXT_INT_MASK_GIG); 1096 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 1097 } 1098 1099 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1100 port->phy_interface == PHY_INTERFACE_MODE_SGMII || 1101 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || 1102 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { 1103 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 1104 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 1105 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 1106 } 1107 } 1108 1109 static void mvpp22_gop_setup_irq(struct mvpp2_port *port) 1110 { 1111 u32 val; 1112 1113 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1114 port->phy_interface == PHY_INTERFACE_MODE_SGMII || 1115 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || 1116 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { 1117 val = readl(port->base + MVPP22_GMAC_INT_MASK); 1118 val |= MVPP22_GMAC_INT_MASK_LINK_STAT; 1119 writel(val, port->base + MVPP22_GMAC_INT_MASK); 1120 } 1121 1122 if (port->gop_id == 0) { 1123 val = readl(port->base + MVPP22_XLG_INT_MASK); 1124 val |= MVPP22_XLG_INT_MASK_LINK; 1125 writel(val, port->base + MVPP22_XLG_INT_MASK); 1126 } 1127 1128 mvpp22_gop_unmask_irq(port); 1129 } 1130 1131 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes). 1132 * 1133 * The PHY mode used by the PPv2 driver comes from the network subsystem, while 1134 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they 1135 * differ. 1136 * 1137 * The COMPHY configures the serdes lanes regardless of the actual use of the 1138 * lanes by the physical layer. This is why configurations like 1139 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. 1140 */ 1141 static int mvpp22_comphy_init(struct mvpp2_port *port) 1142 { 1143 enum phy_mode mode; 1144 int ret; 1145 1146 if (!port->comphy) 1147 return 0; 1148 1149 switch (port->phy_interface) { 1150 case PHY_INTERFACE_MODE_SGMII: 1151 case PHY_INTERFACE_MODE_1000BASEX: 1152 mode = PHY_MODE_SGMII; 1153 break; 1154 case PHY_INTERFACE_MODE_2500BASEX: 1155 mode = PHY_MODE_2500SGMII; 1156 break; 1157 case PHY_INTERFACE_MODE_10GKR: 1158 mode = PHY_MODE_10GKR; 1159 break; 1160 default: 1161 return -EINVAL; 1162 } 1163 1164 ret = phy_set_mode(port->comphy, mode); 1165 if (ret) 1166 return ret; 1167 1168 return phy_power_on(port->comphy); 1169 } 1170 1171 static void mvpp2_port_enable(struct mvpp2_port *port) 1172 { 1173 u32 val; 1174 1175 /* Only GOP port 0 has an XLG MAC */ 1176 if (port->gop_id == 0 && 1177 (port->phy_interface == PHY_INTERFACE_MODE_XAUI || 1178 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { 1179 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 1180 val |= MVPP22_XLG_CTRL0_PORT_EN | 1181 MVPP22_XLG_CTRL0_MAC_RESET_DIS; 1182 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; 1183 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1184 } else { 1185 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1186 val |= MVPP2_GMAC_PORT_EN_MASK; 1187 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 1188 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1189 } 1190 } 1191 1192 static void mvpp2_port_disable(struct mvpp2_port *port) 1193 { 1194 u32 val; 1195 1196 /* Only GOP port 0 has an XLG MAC */ 1197 if (port->gop_id == 0 && 1198 (port->phy_interface == PHY_INTERFACE_MODE_XAUI || 1199 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { 1200 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 1201 val &= ~MVPP22_XLG_CTRL0_PORT_EN; 1202 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1203 1204 /* Disable & reset should be done separately */ 1205 val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; 1206 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1207 } else { 1208 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1209 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 1210 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1211 } 1212 } 1213 1214 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 1215 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 1216 { 1217 u32 val; 1218 1219 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 1220 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 1221 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 1222 } 1223 1224 /* Configure loopback port */ 1225 static void mvpp2_port_loopback_set(struct mvpp2_port *port, 1226 const struct phylink_link_state *state) 1227 { 1228 u32 val; 1229 1230 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 1231 1232 if (state->speed == 1000) 1233 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 1234 else 1235 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 1236 1237 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII || 1238 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || 1239 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) 1240 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 1241 else 1242 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 1243 1244 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 1245 } 1246 1247 struct mvpp2_ethtool_counter { 1248 unsigned int offset; 1249 const char string[ETH_GSTRING_LEN]; 1250 bool reg_is_64b; 1251 }; 1252 1253 static u64 mvpp2_read_count(struct mvpp2_port *port, 1254 const struct mvpp2_ethtool_counter *counter) 1255 { 1256 u64 val; 1257 1258 val = readl(port->stats_base + counter->offset); 1259 if (counter->reg_is_64b) 1260 val += (u64)readl(port->stats_base + counter->offset + 4) << 32; 1261 1262 return val; 1263 } 1264 1265 /* Due to the fact that software statistics and hardware statistics are, by 1266 * design, incremented at different moments in the chain of packet processing, 1267 * it is very likely that incoming packets could have been dropped after being 1268 * counted by hardware but before reaching software statistics (most probably 1269 * multicast packets), and in the oppposite way, during transmission, FCS bytes 1270 * are added in between as well as TSO skb will be split and header bytes added. 1271 * Hence, statistics gathered from userspace with ifconfig (software) and 1272 * ethtool (hardware) cannot be compared. 1273 */ 1274 static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = { 1275 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, 1276 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, 1277 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, 1278 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, 1279 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, 1280 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, 1281 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, 1282 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, 1283 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, 1284 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, 1285 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, 1286 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, 1287 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, 1288 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, 1289 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, 1290 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, 1291 { MVPP2_MIB_FC_SENT, "fc_sent" }, 1292 { MVPP2_MIB_FC_RCVD, "fc_received" }, 1293 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, 1294 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, 1295 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, 1296 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, 1297 { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, 1298 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, 1299 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, 1300 { MVPP2_MIB_COLLISION, "collision" }, 1301 { MVPP2_MIB_LATE_COLLISION, "late_collision" }, 1302 }; 1303 1304 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, 1305 u8 *data) 1306 { 1307 if (sset == ETH_SS_STATS) { 1308 int i; 1309 1310 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) 1311 memcpy(data + i * ETH_GSTRING_LEN, 1312 &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); 1313 } 1314 } 1315 1316 static void mvpp2_gather_hw_statistics(struct work_struct *work) 1317 { 1318 struct delayed_work *del_work = to_delayed_work(work); 1319 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, 1320 stats_work); 1321 u64 *pstats; 1322 int i; 1323 1324 mutex_lock(&port->gather_stats_lock); 1325 1326 pstats = port->ethtool_stats; 1327 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) 1328 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); 1329 1330 /* No need to read again the counters right after this function if it 1331 * was called asynchronously by the user (ie. use of ethtool). 1332 */ 1333 cancel_delayed_work(&port->stats_work); 1334 queue_delayed_work(port->priv->stats_queue, &port->stats_work, 1335 MVPP2_MIB_COUNTERS_STATS_DELAY); 1336 1337 mutex_unlock(&port->gather_stats_lock); 1338 } 1339 1340 static void mvpp2_ethtool_get_stats(struct net_device *dev, 1341 struct ethtool_stats *stats, u64 *data) 1342 { 1343 struct mvpp2_port *port = netdev_priv(dev); 1344 1345 /* Update statistics for the given port, then take the lock to avoid 1346 * concurrent accesses on the ethtool_stats structure during its copy. 1347 */ 1348 mvpp2_gather_hw_statistics(&port->stats_work.work); 1349 1350 mutex_lock(&port->gather_stats_lock); 1351 memcpy(data, port->ethtool_stats, 1352 sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs)); 1353 mutex_unlock(&port->gather_stats_lock); 1354 } 1355 1356 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) 1357 { 1358 if (sset == ETH_SS_STATS) 1359 return ARRAY_SIZE(mvpp2_ethtool_regs); 1360 1361 return -EOPNOTSUPP; 1362 } 1363 1364 static void mvpp2_port_reset(struct mvpp2_port *port) 1365 { 1366 u32 val; 1367 unsigned int i; 1368 1369 /* Read the GOP statistics to reset the hardware counters */ 1370 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) 1371 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); 1372 1373 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 1374 ~MVPP2_GMAC_PORT_RESET_MASK; 1375 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 1376 1377 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 1378 MVPP2_GMAC_PORT_RESET_MASK) 1379 continue; 1380 } 1381 1382 /* Change maximum receive size of the port */ 1383 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 1384 { 1385 u32 val; 1386 1387 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1388 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 1389 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 1390 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 1391 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1392 } 1393 1394 /* Change maximum receive size of the port */ 1395 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) 1396 { 1397 u32 val; 1398 1399 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 1400 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; 1401 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << 1402 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; 1403 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 1404 } 1405 1406 /* Set defaults to the MVPP2 port */ 1407 static void mvpp2_defaults_set(struct mvpp2_port *port) 1408 { 1409 int tx_port_num, val, queue, ptxq, lrxq; 1410 1411 if (port->priv->hw_version == MVPP21) { 1412 /* Update TX FIFO MIN Threshold */ 1413 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 1414 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 1415 /* Min. TX threshold must be less than minimal packet length */ 1416 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 1417 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 1418 } 1419 1420 /* Disable Legacy WRR, Disable EJP, Release from reset */ 1421 tx_port_num = mvpp2_egress_port(port); 1422 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 1423 tx_port_num); 1424 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 1425 1426 /* Close bandwidth for all queues */ 1427 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { 1428 ptxq = mvpp2_txq_phys(port->id, queue); 1429 mvpp2_write(port->priv, 1430 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); 1431 } 1432 1433 /* Set refill period to 1 usec, refill tokens 1434 * and bucket size to maximum 1435 */ 1436 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 1437 port->priv->tclk / USEC_PER_SEC); 1438 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 1439 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 1440 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 1441 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 1442 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 1443 val = MVPP2_TXP_TOKEN_SIZE_MAX; 1444 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 1445 1446 /* Set MaximumLowLatencyPacketSize value to 256 */ 1447 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 1448 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 1449 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 1450 1451 /* Enable Rx cache snoop */ 1452 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 1453 queue = port->rxqs[lrxq]->id; 1454 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 1455 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 1456 MVPP2_SNOOP_BUF_HDR_MASK; 1457 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 1458 } 1459 1460 /* At default, mask all interrupts to all present cpus */ 1461 mvpp2_interrupts_disable(port); 1462 } 1463 1464 /* Enable/disable receiving packets */ 1465 static void mvpp2_ingress_enable(struct mvpp2_port *port) 1466 { 1467 u32 val; 1468 int lrxq, queue; 1469 1470 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 1471 queue = port->rxqs[lrxq]->id; 1472 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 1473 val &= ~MVPP2_RXQ_DISABLE_MASK; 1474 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 1475 } 1476 } 1477 1478 static void mvpp2_ingress_disable(struct mvpp2_port *port) 1479 { 1480 u32 val; 1481 int lrxq, queue; 1482 1483 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 1484 queue = port->rxqs[lrxq]->id; 1485 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 1486 val |= MVPP2_RXQ_DISABLE_MASK; 1487 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 1488 } 1489 } 1490 1491 /* Enable transmit via physical egress queue 1492 * - HW starts take descriptors from DRAM 1493 */ 1494 static void mvpp2_egress_enable(struct mvpp2_port *port) 1495 { 1496 u32 qmap; 1497 int queue; 1498 int tx_port_num = mvpp2_egress_port(port); 1499 1500 /* Enable all initialized TXs. */ 1501 qmap = 0; 1502 for (queue = 0; queue < port->ntxqs; queue++) { 1503 struct mvpp2_tx_queue *txq = port->txqs[queue]; 1504 1505 if (txq->descs) 1506 qmap |= (1 << queue); 1507 } 1508 1509 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 1510 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 1511 } 1512 1513 /* Disable transmit via physical egress queue 1514 * - HW doesn't take descriptors from DRAM 1515 */ 1516 static void mvpp2_egress_disable(struct mvpp2_port *port) 1517 { 1518 u32 reg_data; 1519 int delay; 1520 int tx_port_num = mvpp2_egress_port(port); 1521 1522 /* Issue stop command for active channels only */ 1523 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 1524 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 1525 MVPP2_TXP_SCHED_ENQ_MASK; 1526 if (reg_data != 0) 1527 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 1528 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 1529 1530 /* Wait for all Tx activity to terminate. */ 1531 delay = 0; 1532 do { 1533 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 1534 netdev_warn(port->dev, 1535 "Tx stop timed out, status=0x%08x\n", 1536 reg_data); 1537 break; 1538 } 1539 mdelay(1); 1540 delay++; 1541 1542 /* Check port TX Command register that all 1543 * Tx queues are stopped 1544 */ 1545 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 1546 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 1547 } 1548 1549 /* Rx descriptors helper methods */ 1550 1551 /* Get number of Rx descriptors occupied by received packets */ 1552 static inline int 1553 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 1554 { 1555 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 1556 1557 return val & MVPP2_RXQ_OCCUPIED_MASK; 1558 } 1559 1560 /* Update Rx queue status with the number of occupied and available 1561 * Rx descriptor slots. 1562 */ 1563 static inline void 1564 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 1565 int used_count, int free_count) 1566 { 1567 /* Decrement the number of used descriptors and increment count 1568 * increment the number of free descriptors. 1569 */ 1570 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 1571 1572 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 1573 } 1574 1575 /* Get pointer to next RX descriptor to be processed by SW */ 1576 static inline struct mvpp2_rx_desc * 1577 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 1578 { 1579 int rx_desc = rxq->next_desc_to_proc; 1580 1581 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 1582 prefetch(rxq->descs + rxq->next_desc_to_proc); 1583 return rxq->descs + rx_desc; 1584 } 1585 1586 /* Set rx queue offset */ 1587 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 1588 int prxq, int offset) 1589 { 1590 u32 val; 1591 1592 /* Convert offset from bytes to units of 32 bytes */ 1593 offset = offset >> 5; 1594 1595 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 1596 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 1597 1598 /* Offset is in */ 1599 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 1600 MVPP2_RXQ_PACKET_OFFSET_MASK); 1601 1602 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 1603 } 1604 1605 /* Tx descriptors helper methods */ 1606 1607 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 1608 static struct mvpp2_tx_desc * 1609 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 1610 { 1611 int tx_desc = txq->next_desc_to_proc; 1612 1613 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 1614 return txq->descs + tx_desc; 1615 } 1616 1617 /* Update HW with number of aggregated Tx descriptors to be sent 1618 * 1619 * Called only from mvpp2_tx(), so migration is disabled, using 1620 * smp_processor_id() is OK. 1621 */ 1622 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 1623 { 1624 /* aggregated access - relevant TXQ number is written in TX desc */ 1625 mvpp2_percpu_write(port->priv, smp_processor_id(), 1626 MVPP2_AGGR_TXQ_UPDATE_REG, pending); 1627 } 1628 1629 /* Check if there are enough free descriptors in aggregated txq. 1630 * If not, update the number of occupied descriptors and repeat the check. 1631 * 1632 * Called only from mvpp2_tx(), so migration is disabled, using 1633 * smp_processor_id() is OK. 1634 */ 1635 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, 1636 struct mvpp2_tx_queue *aggr_txq, int num) 1637 { 1638 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { 1639 /* Update number of occupied aggregated Tx descriptors */ 1640 int cpu = smp_processor_id(); 1641 u32 val = mvpp2_read_relaxed(priv, 1642 MVPP2_AGGR_TXQ_STATUS_REG(cpu)); 1643 1644 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; 1645 1646 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) 1647 return -ENOMEM; 1648 } 1649 return 0; 1650 } 1651 1652 /* Reserved Tx descriptors allocation request 1653 * 1654 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called 1655 * only by mvpp2_tx(), so migration is disabled, using 1656 * smp_processor_id() is OK. 1657 */ 1658 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, 1659 struct mvpp2_tx_queue *txq, int num) 1660 { 1661 u32 val; 1662 int cpu = smp_processor_id(); 1663 1664 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; 1665 mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); 1666 1667 val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); 1668 1669 return val & MVPP2_TXQ_RSVD_RSLT_MASK; 1670 } 1671 1672 /* Check if there are enough reserved descriptors for transmission. 1673 * If not, request chunk of reserved descriptors and check again. 1674 */ 1675 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, 1676 struct mvpp2_tx_queue *txq, 1677 struct mvpp2_txq_pcpu *txq_pcpu, 1678 int num) 1679 { 1680 int req, cpu, desc_count; 1681 1682 if (txq_pcpu->reserved_num >= num) 1683 return 0; 1684 1685 /* Not enough descriptors reserved! Update the reserved descriptor 1686 * count and check again. 1687 */ 1688 1689 desc_count = 0; 1690 /* Compute total of used descriptors */ 1691 for_each_present_cpu(cpu) { 1692 struct mvpp2_txq_pcpu *txq_pcpu_aux; 1693 1694 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); 1695 desc_count += txq_pcpu_aux->count; 1696 desc_count += txq_pcpu_aux->reserved_num; 1697 } 1698 1699 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); 1700 desc_count += req; 1701 1702 if (desc_count > 1703 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) 1704 return -ENOMEM; 1705 1706 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); 1707 1708 /* OK, the descriptor could have been updated: check again. */ 1709 if (txq_pcpu->reserved_num < num) 1710 return -ENOMEM; 1711 return 0; 1712 } 1713 1714 /* Release the last allocated Tx descriptor. Useful to handle DMA 1715 * mapping failures in the Tx path. 1716 */ 1717 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) 1718 { 1719 if (txq->next_desc_to_proc == 0) 1720 txq->next_desc_to_proc = txq->last_desc - 1; 1721 else 1722 txq->next_desc_to_proc--; 1723 } 1724 1725 /* Set Tx descriptors fields relevant for CSUM calculation */ 1726 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, 1727 int ip_hdr_len, int l4_proto) 1728 { 1729 u32 command; 1730 1731 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 1732 * G_L4_chk, L4_type required only for checksum calculation 1733 */ 1734 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); 1735 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); 1736 command |= MVPP2_TXD_IP_CSUM_DISABLE; 1737 1738 if (l3_proto == htons(ETH_P_IP)) { 1739 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ 1740 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ 1741 } else { 1742 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ 1743 } 1744 1745 if (l4_proto == IPPROTO_TCP) { 1746 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ 1747 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 1748 } else if (l4_proto == IPPROTO_UDP) { 1749 command |= MVPP2_TXD_L4_UDP; /* enable UDP */ 1750 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 1751 } else { 1752 command |= MVPP2_TXD_L4_CSUM_NOT; 1753 } 1754 1755 return command; 1756 } 1757 1758 /* Get number of sent descriptors and decrement counter. 1759 * The number of sent descriptors is returned. 1760 * Per-CPU access 1761 * 1762 * Called only from mvpp2_txq_done(), called from mvpp2_tx() 1763 * (migration disabled) and from the TX completion tasklet (migration 1764 * disabled) so using smp_processor_id() is OK. 1765 */ 1766 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 1767 struct mvpp2_tx_queue *txq) 1768 { 1769 u32 val; 1770 1771 /* Reading status reg resets transmitted descriptor counter */ 1772 val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), 1773 MVPP2_TXQ_SENT_REG(txq->id)); 1774 1775 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 1776 MVPP2_TRANSMITTED_COUNT_OFFSET; 1777 } 1778 1779 /* Called through on_each_cpu(), so runs on all CPUs, with migration 1780 * disabled, therefore using smp_processor_id() is OK. 1781 */ 1782 static void mvpp2_txq_sent_counter_clear(void *arg) 1783 { 1784 struct mvpp2_port *port = arg; 1785 int queue; 1786 1787 for (queue = 0; queue < port->ntxqs; queue++) { 1788 int id = port->txqs[queue]->id; 1789 1790 mvpp2_percpu_read(port->priv, smp_processor_id(), 1791 MVPP2_TXQ_SENT_REG(id)); 1792 } 1793 } 1794 1795 /* Set max sizes for Tx queues */ 1796 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 1797 { 1798 u32 val, size, mtu; 1799 int txq, tx_port_num; 1800 1801 mtu = port->pkt_size * 8; 1802 if (mtu > MVPP2_TXP_MTU_MAX) 1803 mtu = MVPP2_TXP_MTU_MAX; 1804 1805 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 1806 mtu = 3 * mtu; 1807 1808 /* Indirect access to registers */ 1809 tx_port_num = mvpp2_egress_port(port); 1810 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 1811 1812 /* Set MTU */ 1813 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 1814 val &= ~MVPP2_TXP_MTU_MAX; 1815 val |= mtu; 1816 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 1817 1818 /* TXP token size and all TXQs token size must be larger that MTU */ 1819 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 1820 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 1821 if (size < mtu) { 1822 size = mtu; 1823 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 1824 val |= size; 1825 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 1826 } 1827 1828 for (txq = 0; txq < port->ntxqs; txq++) { 1829 val = mvpp2_read(port->priv, 1830 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 1831 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 1832 1833 if (size < mtu) { 1834 size = mtu; 1835 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 1836 val |= size; 1837 mvpp2_write(port->priv, 1838 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 1839 val); 1840 } 1841 } 1842 } 1843 1844 /* Set the number of packets that will be received before Rx interrupt 1845 * will be generated by HW. 1846 */ 1847 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, 1848 struct mvpp2_rx_queue *rxq) 1849 { 1850 int cpu = get_cpu(); 1851 1852 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) 1853 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; 1854 1855 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 1856 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, 1857 rxq->pkts_coal); 1858 1859 put_cpu(); 1860 } 1861 1862 /* For some reason in the LSP this is done on each CPU. Why ? */ 1863 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, 1864 struct mvpp2_tx_queue *txq) 1865 { 1866 int cpu = get_cpu(); 1867 u32 val; 1868 1869 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) 1870 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; 1871 1872 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); 1873 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 1874 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); 1875 1876 put_cpu(); 1877 } 1878 1879 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) 1880 { 1881 u64 tmp = (u64)clk_hz * usec; 1882 1883 do_div(tmp, USEC_PER_SEC); 1884 1885 return tmp > U32_MAX ? U32_MAX : tmp; 1886 } 1887 1888 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) 1889 { 1890 u64 tmp = (u64)cycles * USEC_PER_SEC; 1891 1892 do_div(tmp, clk_hz); 1893 1894 return tmp > U32_MAX ? U32_MAX : tmp; 1895 } 1896 1897 /* Set the time delay in usec before Rx interrupt */ 1898 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, 1899 struct mvpp2_rx_queue *rxq) 1900 { 1901 unsigned long freq = port->priv->tclk; 1902 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 1903 1904 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { 1905 rxq->time_coal = 1906 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); 1907 1908 /* re-evaluate to get actual register value */ 1909 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 1910 } 1911 1912 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); 1913 } 1914 1915 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) 1916 { 1917 unsigned long freq = port->priv->tclk; 1918 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 1919 1920 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { 1921 port->tx_time_coal = 1922 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); 1923 1924 /* re-evaluate to get actual register value */ 1925 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 1926 } 1927 1928 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); 1929 } 1930 1931 /* Free Tx queue skbuffs */ 1932 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 1933 struct mvpp2_tx_queue *txq, 1934 struct mvpp2_txq_pcpu *txq_pcpu, int num) 1935 { 1936 int i; 1937 1938 for (i = 0; i < num; i++) { 1939 struct mvpp2_txq_pcpu_buf *tx_buf = 1940 txq_pcpu->buffs + txq_pcpu->txq_get_index; 1941 1942 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) 1943 dma_unmap_single(port->dev->dev.parent, tx_buf->dma, 1944 tx_buf->size, DMA_TO_DEVICE); 1945 if (tx_buf->skb) 1946 dev_kfree_skb_any(tx_buf->skb); 1947 1948 mvpp2_txq_inc_get(txq_pcpu); 1949 } 1950 } 1951 1952 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 1953 u32 cause) 1954 { 1955 int queue = fls(cause) - 1; 1956 1957 return port->rxqs[queue]; 1958 } 1959 1960 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 1961 u32 cause) 1962 { 1963 int queue = fls(cause) - 1; 1964 1965 return port->txqs[queue]; 1966 } 1967 1968 /* Handle end of transmission */ 1969 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 1970 struct mvpp2_txq_pcpu *txq_pcpu) 1971 { 1972 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); 1973 int tx_done; 1974 1975 if (txq_pcpu->cpu != smp_processor_id()) 1976 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); 1977 1978 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 1979 if (!tx_done) 1980 return; 1981 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); 1982 1983 txq_pcpu->count -= tx_done; 1984 1985 if (netif_tx_queue_stopped(nq)) 1986 if (txq_pcpu->count <= txq_pcpu->wake_threshold) 1987 netif_tx_wake_queue(nq); 1988 } 1989 1990 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, 1991 int cpu) 1992 { 1993 struct mvpp2_tx_queue *txq; 1994 struct mvpp2_txq_pcpu *txq_pcpu; 1995 unsigned int tx_todo = 0; 1996 1997 while (cause) { 1998 txq = mvpp2_get_tx_queue(port, cause); 1999 if (!txq) 2000 break; 2001 2002 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 2003 2004 if (txq_pcpu->count) { 2005 mvpp2_txq_done(port, txq, txq_pcpu); 2006 tx_todo += txq_pcpu->count; 2007 } 2008 2009 cause &= ~(1 << txq->log_id); 2010 } 2011 return tx_todo; 2012 } 2013 2014 /* Rx/Tx queue initialization/cleanup methods */ 2015 2016 /* Allocate and initialize descriptors for aggr TXQ */ 2017 static int mvpp2_aggr_txq_init(struct platform_device *pdev, 2018 struct mvpp2_tx_queue *aggr_txq, int cpu, 2019 struct mvpp2 *priv) 2020 { 2021 u32 txq_dma; 2022 2023 /* Allocate memory for TX descriptors */ 2024 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, 2025 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2026 &aggr_txq->descs_dma, GFP_KERNEL); 2027 if (!aggr_txq->descs) 2028 return -ENOMEM; 2029 2030 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; 2031 2032 /* Aggr TXQ no reset WA */ 2033 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 2034 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 2035 2036 /* Set Tx descriptors queue starting address indirect 2037 * access 2038 */ 2039 if (priv->hw_version == MVPP21) 2040 txq_dma = aggr_txq->descs_dma; 2041 else 2042 txq_dma = aggr_txq->descs_dma >> 2043 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 2044 2045 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); 2046 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), 2047 MVPP2_AGGR_TXQ_SIZE); 2048 2049 return 0; 2050 } 2051 2052 /* Create a specified Rx queue */ 2053 static int mvpp2_rxq_init(struct mvpp2_port *port, 2054 struct mvpp2_rx_queue *rxq) 2055 2056 { 2057 u32 rxq_dma; 2058 int cpu; 2059 2060 rxq->size = port->rx_ring_size; 2061 2062 /* Allocate memory for RX descriptors */ 2063 rxq->descs = dma_alloc_coherent(port->dev->dev.parent, 2064 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 2065 &rxq->descs_dma, GFP_KERNEL); 2066 if (!rxq->descs) 2067 return -ENOMEM; 2068 2069 rxq->last_desc = rxq->size - 1; 2070 2071 /* Zero occupied and non-occupied counters - direct access */ 2072 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 2073 2074 /* Set Rx descriptors queue starting address - indirect access */ 2075 cpu = get_cpu(); 2076 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 2077 if (port->priv->hw_version == MVPP21) 2078 rxq_dma = rxq->descs_dma; 2079 else 2080 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 2081 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 2082 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 2083 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); 2084 put_cpu(); 2085 2086 /* Set Offset */ 2087 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 2088 2089 /* Set coalescing pkts and time */ 2090 mvpp2_rx_pkts_coal_set(port, rxq); 2091 mvpp2_rx_time_coal_set(port, rxq); 2092 2093 /* Add number of descriptors ready for receiving packets */ 2094 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 2095 2096 return 0; 2097 } 2098 2099 /* Push packets received by the RXQ to BM pool */ 2100 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 2101 struct mvpp2_rx_queue *rxq) 2102 { 2103 int rx_received, i; 2104 2105 rx_received = mvpp2_rxq_received(port, rxq->id); 2106 if (!rx_received) 2107 return; 2108 2109 for (i = 0; i < rx_received; i++) { 2110 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 2111 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 2112 int pool; 2113 2114 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> 2115 MVPP2_RXD_BM_POOL_ID_OFFS; 2116 2117 mvpp2_bm_pool_put(port, pool, 2118 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 2119 mvpp2_rxdesc_cookie_get(port, rx_desc)); 2120 } 2121 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 2122 } 2123 2124 /* Cleanup Rx queue */ 2125 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 2126 struct mvpp2_rx_queue *rxq) 2127 { 2128 int cpu; 2129 2130 mvpp2_rxq_drop_pkts(port, rxq); 2131 2132 if (rxq->descs) 2133 dma_free_coherent(port->dev->dev.parent, 2134 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 2135 rxq->descs, 2136 rxq->descs_dma); 2137 2138 rxq->descs = NULL; 2139 rxq->last_desc = 0; 2140 rxq->next_desc_to_proc = 0; 2141 rxq->descs_dma = 0; 2142 2143 /* Clear Rx descriptors queue starting address and size; 2144 * free descriptor number 2145 */ 2146 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 2147 cpu = get_cpu(); 2148 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 2149 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); 2150 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); 2151 put_cpu(); 2152 } 2153 2154 /* Create and initialize a Tx queue */ 2155 static int mvpp2_txq_init(struct mvpp2_port *port, 2156 struct mvpp2_tx_queue *txq) 2157 { 2158 u32 val; 2159 int cpu, desc, desc_per_txq, tx_port_num; 2160 struct mvpp2_txq_pcpu *txq_pcpu; 2161 2162 txq->size = port->tx_ring_size; 2163 2164 /* Allocate memory for Tx descriptors */ 2165 txq->descs = dma_alloc_coherent(port->dev->dev.parent, 2166 txq->size * MVPP2_DESC_ALIGNED_SIZE, 2167 &txq->descs_dma, GFP_KERNEL); 2168 if (!txq->descs) 2169 return -ENOMEM; 2170 2171 txq->last_desc = txq->size - 1; 2172 2173 /* Set Tx descriptors queue starting address - indirect access */ 2174 cpu = get_cpu(); 2175 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 2176 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 2177 txq->descs_dma); 2178 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 2179 txq->size & MVPP2_TXQ_DESC_SIZE_MASK); 2180 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); 2181 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, 2182 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 2183 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); 2184 val &= ~MVPP2_TXQ_PENDING_MASK; 2185 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); 2186 2187 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 2188 * for each existing TXQ. 2189 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 2190 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS 2191 */ 2192 desc_per_txq = 16; 2193 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 2194 (txq->log_id * desc_per_txq); 2195 2196 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, 2197 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 2198 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 2199 put_cpu(); 2200 2201 /* WRR / EJP configuration - indirect access */ 2202 tx_port_num = mvpp2_egress_port(port); 2203 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2204 2205 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 2206 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 2207 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 2208 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 2209 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 2210 2211 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 2212 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 2213 val); 2214 2215 for_each_present_cpu(cpu) { 2216 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 2217 txq_pcpu->size = txq->size; 2218 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, 2219 sizeof(*txq_pcpu->buffs), 2220 GFP_KERNEL); 2221 if (!txq_pcpu->buffs) 2222 return -ENOMEM; 2223 2224 txq_pcpu->count = 0; 2225 txq_pcpu->reserved_num = 0; 2226 txq_pcpu->txq_put_index = 0; 2227 txq_pcpu->txq_get_index = 0; 2228 txq_pcpu->tso_headers = NULL; 2229 2230 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; 2231 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; 2232 2233 txq_pcpu->tso_headers = 2234 dma_alloc_coherent(port->dev->dev.parent, 2235 txq_pcpu->size * TSO_HEADER_SIZE, 2236 &txq_pcpu->tso_headers_dma, 2237 GFP_KERNEL); 2238 if (!txq_pcpu->tso_headers) 2239 return -ENOMEM; 2240 } 2241 2242 return 0; 2243 } 2244 2245 /* Free allocated TXQ resources */ 2246 static void mvpp2_txq_deinit(struct mvpp2_port *port, 2247 struct mvpp2_tx_queue *txq) 2248 { 2249 struct mvpp2_txq_pcpu *txq_pcpu; 2250 int cpu; 2251 2252 for_each_present_cpu(cpu) { 2253 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 2254 kfree(txq_pcpu->buffs); 2255 2256 if (txq_pcpu->tso_headers) 2257 dma_free_coherent(port->dev->dev.parent, 2258 txq_pcpu->size * TSO_HEADER_SIZE, 2259 txq_pcpu->tso_headers, 2260 txq_pcpu->tso_headers_dma); 2261 2262 txq_pcpu->tso_headers = NULL; 2263 } 2264 2265 if (txq->descs) 2266 dma_free_coherent(port->dev->dev.parent, 2267 txq->size * MVPP2_DESC_ALIGNED_SIZE, 2268 txq->descs, txq->descs_dma); 2269 2270 txq->descs = NULL; 2271 txq->last_desc = 0; 2272 txq->next_desc_to_proc = 0; 2273 txq->descs_dma = 0; 2274 2275 /* Set minimum bandwidth for disabled TXQs */ 2276 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 2277 2278 /* Set Tx descriptors queue starting address and size */ 2279 cpu = get_cpu(); 2280 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 2281 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); 2282 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); 2283 put_cpu(); 2284 } 2285 2286 /* Cleanup Tx ports */ 2287 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 2288 { 2289 struct mvpp2_txq_pcpu *txq_pcpu; 2290 int delay, pending, cpu; 2291 u32 val; 2292 2293 cpu = get_cpu(); 2294 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 2295 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); 2296 val |= MVPP2_TXQ_DRAIN_EN_MASK; 2297 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); 2298 2299 /* The napi queue has been stopped so wait for all packets 2300 * to be transmitted. 2301 */ 2302 delay = 0; 2303 do { 2304 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 2305 netdev_warn(port->dev, 2306 "port %d: cleaning queue %d timed out\n", 2307 port->id, txq->log_id); 2308 break; 2309 } 2310 mdelay(1); 2311 delay++; 2312 2313 pending = mvpp2_percpu_read(port->priv, cpu, 2314 MVPP2_TXQ_PENDING_REG); 2315 pending &= MVPP2_TXQ_PENDING_MASK; 2316 } while (pending); 2317 2318 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 2319 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); 2320 put_cpu(); 2321 2322 for_each_present_cpu(cpu) { 2323 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 2324 2325 /* Release all packets */ 2326 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 2327 2328 /* Reset queue */ 2329 txq_pcpu->count = 0; 2330 txq_pcpu->txq_put_index = 0; 2331 txq_pcpu->txq_get_index = 0; 2332 } 2333 } 2334 2335 /* Cleanup all Tx queues */ 2336 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 2337 { 2338 struct mvpp2_tx_queue *txq; 2339 int queue; 2340 u32 val; 2341 2342 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 2343 2344 /* Reset Tx ports and delete Tx queues */ 2345 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 2346 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 2347 2348 for (queue = 0; queue < port->ntxqs; queue++) { 2349 txq = port->txqs[queue]; 2350 mvpp2_txq_clean(port, txq); 2351 mvpp2_txq_deinit(port, txq); 2352 } 2353 2354 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 2355 2356 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 2357 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 2358 } 2359 2360 /* Cleanup all Rx queues */ 2361 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 2362 { 2363 int queue; 2364 2365 for (queue = 0; queue < port->nrxqs; queue++) 2366 mvpp2_rxq_deinit(port, port->rxqs[queue]); 2367 } 2368 2369 /* Init all Rx queues for port */ 2370 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 2371 { 2372 int queue, err; 2373 2374 for (queue = 0; queue < port->nrxqs; queue++) { 2375 err = mvpp2_rxq_init(port, port->rxqs[queue]); 2376 if (err) 2377 goto err_cleanup; 2378 } 2379 return 0; 2380 2381 err_cleanup: 2382 mvpp2_cleanup_rxqs(port); 2383 return err; 2384 } 2385 2386 /* Init all tx queues for port */ 2387 static int mvpp2_setup_txqs(struct mvpp2_port *port) 2388 { 2389 struct mvpp2_tx_queue *txq; 2390 int queue, err; 2391 2392 for (queue = 0; queue < port->ntxqs; queue++) { 2393 txq = port->txqs[queue]; 2394 err = mvpp2_txq_init(port, txq); 2395 if (err) 2396 goto err_cleanup; 2397 } 2398 2399 if (port->has_tx_irqs) { 2400 mvpp2_tx_time_coal_set(port); 2401 for (queue = 0; queue < port->ntxqs; queue++) { 2402 txq = port->txqs[queue]; 2403 mvpp2_tx_pkts_coal_set(port, txq); 2404 } 2405 } 2406 2407 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 2408 return 0; 2409 2410 err_cleanup: 2411 mvpp2_cleanup_txqs(port); 2412 return err; 2413 } 2414 2415 /* The callback for per-port interrupt */ 2416 static irqreturn_t mvpp2_isr(int irq, void *dev_id) 2417 { 2418 struct mvpp2_queue_vector *qv = dev_id; 2419 2420 mvpp2_qvec_interrupt_disable(qv); 2421 2422 napi_schedule(&qv->napi); 2423 2424 return IRQ_HANDLED; 2425 } 2426 2427 /* Per-port interrupt for link status changes */ 2428 static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) 2429 { 2430 struct mvpp2_port *port = (struct mvpp2_port *)dev_id; 2431 struct net_device *dev = port->dev; 2432 bool event = false, link = false; 2433 u32 val; 2434 2435 mvpp22_gop_mask_irq(port); 2436 2437 if (port->gop_id == 0 && 2438 port->phy_interface == PHY_INTERFACE_MODE_10GKR) { 2439 val = readl(port->base + MVPP22_XLG_INT_STAT); 2440 if (val & MVPP22_XLG_INT_STAT_LINK) { 2441 event = true; 2442 val = readl(port->base + MVPP22_XLG_STATUS); 2443 if (val & MVPP22_XLG_STATUS_LINK_UP) 2444 link = true; 2445 } 2446 } else if (phy_interface_mode_is_rgmii(port->phy_interface) || 2447 port->phy_interface == PHY_INTERFACE_MODE_SGMII || 2448 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || 2449 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { 2450 val = readl(port->base + MVPP22_GMAC_INT_STAT); 2451 if (val & MVPP22_GMAC_INT_STAT_LINK) { 2452 event = true; 2453 val = readl(port->base + MVPP2_GMAC_STATUS0); 2454 if (val & MVPP2_GMAC_STATUS0_LINK_UP) 2455 link = true; 2456 } 2457 } 2458 2459 if (port->phylink) { 2460 phylink_mac_change(port->phylink, link); 2461 goto handled; 2462 } 2463 2464 if (!netif_running(dev) || !event) 2465 goto handled; 2466 2467 if (link) { 2468 mvpp2_interrupts_enable(port); 2469 2470 mvpp2_egress_enable(port); 2471 mvpp2_ingress_enable(port); 2472 netif_carrier_on(dev); 2473 netif_tx_wake_all_queues(dev); 2474 } else { 2475 netif_tx_stop_all_queues(dev); 2476 netif_carrier_off(dev); 2477 mvpp2_ingress_disable(port); 2478 mvpp2_egress_disable(port); 2479 2480 mvpp2_interrupts_disable(port); 2481 } 2482 2483 handled: 2484 mvpp22_gop_unmask_irq(port); 2485 return IRQ_HANDLED; 2486 } 2487 2488 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) 2489 { 2490 ktime_t interval; 2491 2492 if (!port_pcpu->timer_scheduled) { 2493 port_pcpu->timer_scheduled = true; 2494 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS; 2495 hrtimer_start(&port_pcpu->tx_done_timer, interval, 2496 HRTIMER_MODE_REL_PINNED); 2497 } 2498 } 2499 2500 static void mvpp2_tx_proc_cb(unsigned long data) 2501 { 2502 struct net_device *dev = (struct net_device *)data; 2503 struct mvpp2_port *port = netdev_priv(dev); 2504 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); 2505 unsigned int tx_todo, cause; 2506 2507 if (!netif_running(dev)) 2508 return; 2509 port_pcpu->timer_scheduled = false; 2510 2511 /* Process all the Tx queues */ 2512 cause = (1 << port->ntxqs) - 1; 2513 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); 2514 2515 /* Set the timer in case not all the packets were processed */ 2516 if (tx_todo) 2517 mvpp2_timer_set(port_pcpu); 2518 } 2519 2520 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) 2521 { 2522 struct mvpp2_port_pcpu *port_pcpu = container_of(timer, 2523 struct mvpp2_port_pcpu, 2524 tx_done_timer); 2525 2526 tasklet_schedule(&port_pcpu->tx_done_tasklet); 2527 2528 return HRTIMER_NORESTART; 2529 } 2530 2531 /* Main RX/TX processing routines */ 2532 2533 /* Display more error info */ 2534 static void mvpp2_rx_error(struct mvpp2_port *port, 2535 struct mvpp2_rx_desc *rx_desc) 2536 { 2537 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 2538 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 2539 char *err_str = NULL; 2540 2541 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 2542 case MVPP2_RXD_ERR_CRC: 2543 err_str = "crc"; 2544 break; 2545 case MVPP2_RXD_ERR_OVERRUN: 2546 err_str = "overrun"; 2547 break; 2548 case MVPP2_RXD_ERR_RESOURCE: 2549 err_str = "resource"; 2550 break; 2551 } 2552 if (err_str && net_ratelimit()) 2553 netdev_err(port->dev, 2554 "bad rx status %08x (%s error), size=%zu\n", 2555 status, err_str, sz); 2556 } 2557 2558 /* Handle RX checksum offload */ 2559 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, 2560 struct sk_buff *skb) 2561 { 2562 if (((status & MVPP2_RXD_L3_IP4) && 2563 !(status & MVPP2_RXD_IP4_HEADER_ERR)) || 2564 (status & MVPP2_RXD_L3_IP6)) 2565 if (((status & MVPP2_RXD_L4_UDP) || 2566 (status & MVPP2_RXD_L4_TCP)) && 2567 (status & MVPP2_RXD_L4_CSUM_OK)) { 2568 skb->csum = 0; 2569 skb->ip_summed = CHECKSUM_UNNECESSARY; 2570 return; 2571 } 2572 2573 skb->ip_summed = CHECKSUM_NONE; 2574 } 2575 2576 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 2577 static int mvpp2_rx_refill(struct mvpp2_port *port, 2578 struct mvpp2_bm_pool *bm_pool, int pool) 2579 { 2580 dma_addr_t dma_addr; 2581 phys_addr_t phys_addr; 2582 void *buf; 2583 2584 /* No recycle or too many buffers are in use, so allocate a new skb */ 2585 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, 2586 GFP_ATOMIC); 2587 if (!buf) 2588 return -ENOMEM; 2589 2590 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2591 2592 return 0; 2593 } 2594 2595 /* Handle tx checksum */ 2596 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) 2597 { 2598 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2599 int ip_hdr_len = 0; 2600 u8 l4_proto; 2601 2602 if (skb->protocol == htons(ETH_P_IP)) { 2603 struct iphdr *ip4h = ip_hdr(skb); 2604 2605 /* Calculate IPv4 checksum and L4 checksum */ 2606 ip_hdr_len = ip4h->ihl; 2607 l4_proto = ip4h->protocol; 2608 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2609 struct ipv6hdr *ip6h = ipv6_hdr(skb); 2610 2611 /* Read l4_protocol from one of IPv6 extra headers */ 2612 if (skb_network_header_len(skb) > 0) 2613 ip_hdr_len = (skb_network_header_len(skb) >> 2); 2614 l4_proto = ip6h->nexthdr; 2615 } else { 2616 return MVPP2_TXD_L4_CSUM_NOT; 2617 } 2618 2619 return mvpp2_txq_desc_csum(skb_network_offset(skb), 2620 skb->protocol, ip_hdr_len, l4_proto); 2621 } 2622 2623 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; 2624 } 2625 2626 /* Main rx processing */ 2627 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, 2628 int rx_todo, struct mvpp2_rx_queue *rxq) 2629 { 2630 struct net_device *dev = port->dev; 2631 int rx_received; 2632 int rx_done = 0; 2633 u32 rcvd_pkts = 0; 2634 u32 rcvd_bytes = 0; 2635 2636 /* Get number of received packets and clamp the to-do */ 2637 rx_received = mvpp2_rxq_received(port, rxq->id); 2638 if (rx_todo > rx_received) 2639 rx_todo = rx_received; 2640 2641 while (rx_done < rx_todo) { 2642 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 2643 struct mvpp2_bm_pool *bm_pool; 2644 struct sk_buff *skb; 2645 unsigned int frag_size; 2646 dma_addr_t dma_addr; 2647 phys_addr_t phys_addr; 2648 u32 rx_status; 2649 int pool, rx_bytes, err; 2650 void *data; 2651 2652 rx_done++; 2653 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 2654 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 2655 rx_bytes -= MVPP2_MH_SIZE; 2656 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 2657 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 2658 data = (void *)phys_to_virt(phys_addr); 2659 2660 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> 2661 MVPP2_RXD_BM_POOL_ID_OFFS; 2662 bm_pool = &port->priv->bm_pools[pool]; 2663 2664 /* In case of an error, release the requested buffer pointer 2665 * to the Buffer Manager. This request process is controlled 2666 * by the hardware, and the information about the buffer is 2667 * comprised by the RX descriptor. 2668 */ 2669 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 2670 err_drop_frame: 2671 dev->stats.rx_errors++; 2672 mvpp2_rx_error(port, rx_desc); 2673 /* Return the buffer to the pool */ 2674 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2675 continue; 2676 } 2677 2678 if (bm_pool->frag_size > PAGE_SIZE) 2679 frag_size = 0; 2680 else 2681 frag_size = bm_pool->frag_size; 2682 2683 skb = build_skb(data, frag_size); 2684 if (!skb) { 2685 netdev_warn(port->dev, "skb build failed\n"); 2686 goto err_drop_frame; 2687 } 2688 2689 err = mvpp2_rx_refill(port, bm_pool, pool); 2690 if (err) { 2691 netdev_err(port->dev, "failed to refill BM pools\n"); 2692 goto err_drop_frame; 2693 } 2694 2695 dma_unmap_single(dev->dev.parent, dma_addr, 2696 bm_pool->buf_size, DMA_FROM_DEVICE); 2697 2698 rcvd_pkts++; 2699 rcvd_bytes += rx_bytes; 2700 2701 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); 2702 skb_put(skb, rx_bytes); 2703 skb->protocol = eth_type_trans(skb, dev); 2704 mvpp2_rx_csum(port, rx_status, skb); 2705 2706 napi_gro_receive(napi, skb); 2707 } 2708 2709 if (rcvd_pkts) { 2710 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 2711 2712 u64_stats_update_begin(&stats->syncp); 2713 stats->rx_packets += rcvd_pkts; 2714 stats->rx_bytes += rcvd_bytes; 2715 u64_stats_update_end(&stats->syncp); 2716 } 2717 2718 /* Update Rx queue management counters */ 2719 wmb(); 2720 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); 2721 2722 return rx_todo; 2723 } 2724 2725 static inline void 2726 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 2727 struct mvpp2_tx_desc *desc) 2728 { 2729 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); 2730 2731 dma_addr_t buf_dma_addr = 2732 mvpp2_txdesc_dma_addr_get(port, desc); 2733 size_t buf_sz = 2734 mvpp2_txdesc_size_get(port, desc); 2735 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) 2736 dma_unmap_single(port->dev->dev.parent, buf_dma_addr, 2737 buf_sz, DMA_TO_DEVICE); 2738 mvpp2_txq_desc_put(txq); 2739 } 2740 2741 /* Handle tx fragmentation processing */ 2742 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, 2743 struct mvpp2_tx_queue *aggr_txq, 2744 struct mvpp2_tx_queue *txq) 2745 { 2746 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); 2747 struct mvpp2_tx_desc *tx_desc; 2748 int i; 2749 dma_addr_t buf_dma_addr; 2750 2751 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2752 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2753 void *addr = page_address(frag->page.p) + frag->page_offset; 2754 2755 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 2756 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 2757 mvpp2_txdesc_size_set(port, tx_desc, frag->size); 2758 2759 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, 2760 frag->size, DMA_TO_DEVICE); 2761 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { 2762 mvpp2_txq_desc_put(txq); 2763 goto cleanup; 2764 } 2765 2766 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 2767 2768 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 2769 /* Last descriptor */ 2770 mvpp2_txdesc_cmd_set(port, tx_desc, 2771 MVPP2_TXD_L_DESC); 2772 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); 2773 } else { 2774 /* Descriptor in the middle: Not First, Not Last */ 2775 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 2776 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 2777 } 2778 } 2779 2780 return 0; 2781 cleanup: 2782 /* Release all descriptors that were used to map fragments of 2783 * this packet, as well as the corresponding DMA mappings 2784 */ 2785 for (i = i - 1; i >= 0; i--) { 2786 tx_desc = txq->descs + i; 2787 tx_desc_unmap_put(port, txq, tx_desc); 2788 } 2789 2790 return -ENOMEM; 2791 } 2792 2793 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, 2794 struct net_device *dev, 2795 struct mvpp2_tx_queue *txq, 2796 struct mvpp2_tx_queue *aggr_txq, 2797 struct mvpp2_txq_pcpu *txq_pcpu, 2798 int hdr_sz) 2799 { 2800 struct mvpp2_port *port = netdev_priv(dev); 2801 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 2802 dma_addr_t addr; 2803 2804 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 2805 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); 2806 2807 addr = txq_pcpu->tso_headers_dma + 2808 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 2809 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); 2810 2811 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | 2812 MVPP2_TXD_F_DESC | 2813 MVPP2_TXD_PADDING_DISABLE); 2814 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 2815 } 2816 2817 static inline int mvpp2_tso_put_data(struct sk_buff *skb, 2818 struct net_device *dev, struct tso_t *tso, 2819 struct mvpp2_tx_queue *txq, 2820 struct mvpp2_tx_queue *aggr_txq, 2821 struct mvpp2_txq_pcpu *txq_pcpu, 2822 int sz, bool left, bool last) 2823 { 2824 struct mvpp2_port *port = netdev_priv(dev); 2825 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 2826 dma_addr_t buf_dma_addr; 2827 2828 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 2829 mvpp2_txdesc_size_set(port, tx_desc, sz); 2830 2831 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, 2832 DMA_TO_DEVICE); 2833 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 2834 mvpp2_txq_desc_put(txq); 2835 return -ENOMEM; 2836 } 2837 2838 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 2839 2840 if (!left) { 2841 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); 2842 if (last) { 2843 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); 2844 return 0; 2845 } 2846 } else { 2847 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 2848 } 2849 2850 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 2851 return 0; 2852 } 2853 2854 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, 2855 struct mvpp2_tx_queue *txq, 2856 struct mvpp2_tx_queue *aggr_txq, 2857 struct mvpp2_txq_pcpu *txq_pcpu) 2858 { 2859 struct mvpp2_port *port = netdev_priv(dev); 2860 struct tso_t tso; 2861 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); 2862 int i, len, descs = 0; 2863 2864 /* Check number of available descriptors */ 2865 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, 2866 tso_count_descs(skb)) || 2867 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, 2868 tso_count_descs(skb))) 2869 return 0; 2870 2871 tso_start(skb, &tso); 2872 len = skb->len - hdr_sz; 2873 while (len > 0) { 2874 int left = min_t(int, skb_shinfo(skb)->gso_size, len); 2875 char *hdr = txq_pcpu->tso_headers + 2876 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 2877 2878 len -= left; 2879 descs++; 2880 2881 tso_build_hdr(skb, hdr, &tso, left, len == 0); 2882 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); 2883 2884 while (left > 0) { 2885 int sz = min_t(int, tso.size, left); 2886 left -= sz; 2887 descs++; 2888 2889 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, 2890 txq_pcpu, sz, left, len == 0)) 2891 goto release; 2892 tso_build_data(skb, &tso, sz); 2893 } 2894 } 2895 2896 return descs; 2897 2898 release: 2899 for (i = descs - 1; i >= 0; i--) { 2900 struct mvpp2_tx_desc *tx_desc = txq->descs + i; 2901 tx_desc_unmap_put(port, txq, tx_desc); 2902 } 2903 return 0; 2904 } 2905 2906 /* Main tx processing */ 2907 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) 2908 { 2909 struct mvpp2_port *port = netdev_priv(dev); 2910 struct mvpp2_tx_queue *txq, *aggr_txq; 2911 struct mvpp2_txq_pcpu *txq_pcpu; 2912 struct mvpp2_tx_desc *tx_desc; 2913 dma_addr_t buf_dma_addr; 2914 int frags = 0; 2915 u16 txq_id; 2916 u32 tx_cmd; 2917 2918 txq_id = skb_get_queue_mapping(skb); 2919 txq = port->txqs[txq_id]; 2920 txq_pcpu = this_cpu_ptr(txq->pcpu); 2921 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; 2922 2923 if (skb_is_gso(skb)) { 2924 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); 2925 goto out; 2926 } 2927 frags = skb_shinfo(skb)->nr_frags + 1; 2928 2929 /* Check number of available descriptors */ 2930 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || 2931 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, 2932 txq_pcpu, frags)) { 2933 frags = 0; 2934 goto out; 2935 } 2936 2937 /* Get a descriptor for the first part of the packet */ 2938 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 2939 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 2940 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); 2941 2942 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, 2943 skb_headlen(skb), DMA_TO_DEVICE); 2944 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 2945 mvpp2_txq_desc_put(txq); 2946 frags = 0; 2947 goto out; 2948 } 2949 2950 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 2951 2952 tx_cmd = mvpp2_skb_tx_csum(port, skb); 2953 2954 if (frags == 1) { 2955 /* First and Last descriptor */ 2956 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 2957 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 2958 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); 2959 } else { 2960 /* First but not Last */ 2961 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 2962 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 2963 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 2964 2965 /* Continue with other skb fragments */ 2966 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 2967 tx_desc_unmap_put(port, txq, tx_desc); 2968 frags = 0; 2969 } 2970 } 2971 2972 out: 2973 if (frags > 0) { 2974 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 2975 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 2976 2977 txq_pcpu->reserved_num -= frags; 2978 txq_pcpu->count += frags; 2979 aggr_txq->count += frags; 2980 2981 /* Enable transmit */ 2982 wmb(); 2983 mvpp2_aggr_txq_pend_desc_add(port, frags); 2984 2985 if (txq_pcpu->count >= txq_pcpu->stop_threshold) 2986 netif_tx_stop_queue(nq); 2987 2988 u64_stats_update_begin(&stats->syncp); 2989 stats->tx_packets++; 2990 stats->tx_bytes += skb->len; 2991 u64_stats_update_end(&stats->syncp); 2992 } else { 2993 dev->stats.tx_dropped++; 2994 dev_kfree_skb_any(skb); 2995 } 2996 2997 /* Finalize TX processing */ 2998 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) 2999 mvpp2_txq_done(port, txq, txq_pcpu); 3000 3001 /* Set the timer in case not all frags were processed */ 3002 if (!port->has_tx_irqs && txq_pcpu->count <= frags && 3003 txq_pcpu->count > 0) { 3004 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); 3005 3006 mvpp2_timer_set(port_pcpu); 3007 } 3008 3009 return NETDEV_TX_OK; 3010 } 3011 3012 static inline void mvpp2_cause_error(struct net_device *dev, int cause) 3013 { 3014 if (cause & MVPP2_CAUSE_FCS_ERR_MASK) 3015 netdev_err(dev, "FCS error\n"); 3016 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) 3017 netdev_err(dev, "rx fifo overrun error\n"); 3018 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) 3019 netdev_err(dev, "tx fifo underrun error\n"); 3020 } 3021 3022 static int mvpp2_poll(struct napi_struct *napi, int budget) 3023 { 3024 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; 3025 int rx_done = 0; 3026 struct mvpp2_port *port = netdev_priv(napi->dev); 3027 struct mvpp2_queue_vector *qv; 3028 int cpu = smp_processor_id(); 3029 3030 qv = container_of(napi, struct mvpp2_queue_vector, napi); 3031 3032 /* Rx/Tx cause register 3033 * 3034 * Bits 0-15: each bit indicates received packets on the Rx queue 3035 * (bit 0 is for Rx queue 0). 3036 * 3037 * Bits 16-23: each bit indicates transmitted packets on the Tx queue 3038 * (bit 16 is for Tx queue 0). 3039 * 3040 * Each CPU has its own Rx/Tx cause register 3041 */ 3042 cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, 3043 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 3044 3045 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 3046 if (cause_misc) { 3047 mvpp2_cause_error(port->dev, cause_misc); 3048 3049 /* Clear the cause register */ 3050 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); 3051 mvpp2_percpu_write(port->priv, cpu, 3052 MVPP2_ISR_RX_TX_CAUSE_REG(port->id), 3053 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 3054 } 3055 3056 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 3057 if (cause_tx) { 3058 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; 3059 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); 3060 } 3061 3062 /* Process RX packets */ 3063 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 3064 cause_rx <<= qv->first_rxq; 3065 cause_rx |= qv->pending_cause_rx; 3066 while (cause_rx && budget > 0) { 3067 int count; 3068 struct mvpp2_rx_queue *rxq; 3069 3070 rxq = mvpp2_get_rx_queue(port, cause_rx); 3071 if (!rxq) 3072 break; 3073 3074 count = mvpp2_rx(port, napi, budget, rxq); 3075 rx_done += count; 3076 budget -= count; 3077 if (budget > 0) { 3078 /* Clear the bit associated to this Rx queue 3079 * so that next iteration will continue from 3080 * the next Rx queue. 3081 */ 3082 cause_rx &= ~(1 << rxq->logic_rxq); 3083 } 3084 } 3085 3086 if (budget > 0) { 3087 cause_rx = 0; 3088 napi_complete_done(napi, rx_done); 3089 3090 mvpp2_qvec_interrupt_enable(qv); 3091 } 3092 qv->pending_cause_rx = cause_rx; 3093 return rx_done; 3094 } 3095 3096 static void mvpp22_mode_reconfigure(struct mvpp2_port *port) 3097 { 3098 u32 ctrl3; 3099 3100 /* comphy reconfiguration */ 3101 mvpp22_comphy_init(port); 3102 3103 /* gop reconfiguration */ 3104 mvpp22_gop_init(port); 3105 3106 /* Only GOP port 0 has an XLG MAC */ 3107 if (port->gop_id == 0) { 3108 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); 3109 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3110 3111 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || 3112 port->phy_interface == PHY_INTERFACE_MODE_10GKR) 3113 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; 3114 else 3115 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 3116 3117 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); 3118 } 3119 3120 if (port->gop_id == 0 && 3121 (port->phy_interface == PHY_INTERFACE_MODE_XAUI || 3122 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) 3123 mvpp2_xlg_max_rx_size_set(port); 3124 else 3125 mvpp2_gmac_max_rx_size_set(port); 3126 } 3127 3128 /* Set hw internals when starting port */ 3129 static void mvpp2_start_dev(struct mvpp2_port *port) 3130 { 3131 int i; 3132 3133 mvpp2_txp_max_tx_size_set(port); 3134 3135 for (i = 0; i < port->nqvecs; i++) 3136 napi_enable(&port->qvecs[i].napi); 3137 3138 /* Enable interrupts on all CPUs */ 3139 mvpp2_interrupts_enable(port); 3140 3141 if (port->priv->hw_version == MVPP22) 3142 mvpp22_mode_reconfigure(port); 3143 3144 if (port->phylink) { 3145 phylink_start(port->phylink); 3146 } else { 3147 /* Phylink isn't used as of now for ACPI, so the MAC has to be 3148 * configured manually when the interface is started. This will 3149 * be removed as soon as the phylink ACPI support lands in. 3150 */ 3151 struct phylink_link_state state = { 3152 .interface = port->phy_interface, 3153 .link = 1, 3154 }; 3155 mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); 3156 } 3157 3158 netif_tx_start_all_queues(port->dev); 3159 } 3160 3161 /* Set hw internals when stopping port */ 3162 static void mvpp2_stop_dev(struct mvpp2_port *port) 3163 { 3164 int i; 3165 3166 /* Disable interrupts on all CPUs */ 3167 mvpp2_interrupts_disable(port); 3168 3169 for (i = 0; i < port->nqvecs; i++) 3170 napi_disable(&port->qvecs[i].napi); 3171 3172 if (port->phylink) 3173 phylink_stop(port->phylink); 3174 phy_power_off(port->comphy); 3175 } 3176 3177 static int mvpp2_check_ringparam_valid(struct net_device *dev, 3178 struct ethtool_ringparam *ring) 3179 { 3180 u16 new_rx_pending = ring->rx_pending; 3181 u16 new_tx_pending = ring->tx_pending; 3182 3183 if (ring->rx_pending == 0 || ring->tx_pending == 0) 3184 return -EINVAL; 3185 3186 if (ring->rx_pending > MVPP2_MAX_RXD_MAX) 3187 new_rx_pending = MVPP2_MAX_RXD_MAX; 3188 else if (!IS_ALIGNED(ring->rx_pending, 16)) 3189 new_rx_pending = ALIGN(ring->rx_pending, 16); 3190 3191 if (ring->tx_pending > MVPP2_MAX_TXD_MAX) 3192 new_tx_pending = MVPP2_MAX_TXD_MAX; 3193 else if (!IS_ALIGNED(ring->tx_pending, 32)) 3194 new_tx_pending = ALIGN(ring->tx_pending, 32); 3195 3196 /* The Tx ring size cannot be smaller than the minimum number of 3197 * descriptors needed for TSO. 3198 */ 3199 if (new_tx_pending < MVPP2_MAX_SKB_DESCS) 3200 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); 3201 3202 if (ring->rx_pending != new_rx_pending) { 3203 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", 3204 ring->rx_pending, new_rx_pending); 3205 ring->rx_pending = new_rx_pending; 3206 } 3207 3208 if (ring->tx_pending != new_tx_pending) { 3209 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", 3210 ring->tx_pending, new_tx_pending); 3211 ring->tx_pending = new_tx_pending; 3212 } 3213 3214 return 0; 3215 } 3216 3217 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) 3218 { 3219 u32 mac_addr_l, mac_addr_m, mac_addr_h; 3220 3221 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 3222 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); 3223 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); 3224 addr[0] = (mac_addr_h >> 24) & 0xFF; 3225 addr[1] = (mac_addr_h >> 16) & 0xFF; 3226 addr[2] = (mac_addr_h >> 8) & 0xFF; 3227 addr[3] = mac_addr_h & 0xFF; 3228 addr[4] = mac_addr_m & 0xFF; 3229 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; 3230 } 3231 3232 static int mvpp2_irqs_init(struct mvpp2_port *port) 3233 { 3234 int err, i; 3235 3236 for (i = 0; i < port->nqvecs; i++) { 3237 struct mvpp2_queue_vector *qv = port->qvecs + i; 3238 3239 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) 3240 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); 3241 3242 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); 3243 if (err) 3244 goto err; 3245 3246 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) 3247 irq_set_affinity_hint(qv->irq, 3248 cpumask_of(qv->sw_thread_id)); 3249 } 3250 3251 return 0; 3252 err: 3253 for (i = 0; i < port->nqvecs; i++) { 3254 struct mvpp2_queue_vector *qv = port->qvecs + i; 3255 3256 irq_set_affinity_hint(qv->irq, NULL); 3257 free_irq(qv->irq, qv); 3258 } 3259 3260 return err; 3261 } 3262 3263 static void mvpp2_irqs_deinit(struct mvpp2_port *port) 3264 { 3265 int i; 3266 3267 for (i = 0; i < port->nqvecs; i++) { 3268 struct mvpp2_queue_vector *qv = port->qvecs + i; 3269 3270 irq_set_affinity_hint(qv->irq, NULL); 3271 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); 3272 free_irq(qv->irq, qv); 3273 } 3274 } 3275 3276 static bool mvpp22_rss_is_supported(void) 3277 { 3278 return queue_mode == MVPP2_QDIST_MULTI_MODE; 3279 } 3280 3281 static int mvpp2_open(struct net_device *dev) 3282 { 3283 struct mvpp2_port *port = netdev_priv(dev); 3284 struct mvpp2 *priv = port->priv; 3285 unsigned char mac_bcast[ETH_ALEN] = { 3286 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 3287 bool valid = false; 3288 int err; 3289 3290 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); 3291 if (err) { 3292 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 3293 return err; 3294 } 3295 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); 3296 if (err) { 3297 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); 3298 return err; 3299 } 3300 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); 3301 if (err) { 3302 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); 3303 return err; 3304 } 3305 err = mvpp2_prs_def_flow(port); 3306 if (err) { 3307 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 3308 return err; 3309 } 3310 3311 /* Allocate the Rx/Tx queues */ 3312 err = mvpp2_setup_rxqs(port); 3313 if (err) { 3314 netdev_err(port->dev, "cannot allocate Rx queues\n"); 3315 return err; 3316 } 3317 3318 err = mvpp2_setup_txqs(port); 3319 if (err) { 3320 netdev_err(port->dev, "cannot allocate Tx queues\n"); 3321 goto err_cleanup_rxqs; 3322 } 3323 3324 err = mvpp2_irqs_init(port); 3325 if (err) { 3326 netdev_err(port->dev, "cannot init IRQs\n"); 3327 goto err_cleanup_txqs; 3328 } 3329 3330 /* Phylink isn't supported yet in ACPI mode */ 3331 if (port->of_node) { 3332 err = phylink_of_phy_connect(port->phylink, port->of_node, 0); 3333 if (err) { 3334 netdev_err(port->dev, "could not attach PHY (%d)\n", 3335 err); 3336 goto err_free_irq; 3337 } 3338 3339 valid = true; 3340 } 3341 3342 if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) { 3343 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, 3344 dev->name, port); 3345 if (err) { 3346 netdev_err(port->dev, "cannot request link IRQ %d\n", 3347 port->link_irq); 3348 goto err_free_irq; 3349 } 3350 3351 mvpp22_gop_setup_irq(port); 3352 3353 /* In default link is down */ 3354 netif_carrier_off(port->dev); 3355 3356 valid = true; 3357 } else { 3358 port->link_irq = 0; 3359 } 3360 3361 if (!valid) { 3362 netdev_err(port->dev, 3363 "invalid configuration: no dt or link IRQ"); 3364 goto err_free_irq; 3365 } 3366 3367 /* Unmask interrupts on all CPUs */ 3368 on_each_cpu(mvpp2_interrupts_unmask, port, 1); 3369 mvpp2_shared_interrupt_mask_unmask(port, false); 3370 3371 mvpp2_start_dev(port); 3372 3373 /* Start hardware statistics gathering */ 3374 queue_delayed_work(priv->stats_queue, &port->stats_work, 3375 MVPP2_MIB_COUNTERS_STATS_DELAY); 3376 3377 return 0; 3378 3379 err_free_irq: 3380 mvpp2_irqs_deinit(port); 3381 err_cleanup_txqs: 3382 mvpp2_cleanup_txqs(port); 3383 err_cleanup_rxqs: 3384 mvpp2_cleanup_rxqs(port); 3385 return err; 3386 } 3387 3388 static int mvpp2_stop(struct net_device *dev) 3389 { 3390 struct mvpp2_port *port = netdev_priv(dev); 3391 struct mvpp2_port_pcpu *port_pcpu; 3392 int cpu; 3393 3394 mvpp2_stop_dev(port); 3395 3396 /* Mask interrupts on all CPUs */ 3397 on_each_cpu(mvpp2_interrupts_mask, port, 1); 3398 mvpp2_shared_interrupt_mask_unmask(port, true); 3399 3400 if (port->phylink) 3401 phylink_disconnect_phy(port->phylink); 3402 if (port->link_irq) 3403 free_irq(port->link_irq, port); 3404 3405 mvpp2_irqs_deinit(port); 3406 if (!port->has_tx_irqs) { 3407 for_each_present_cpu(cpu) { 3408 port_pcpu = per_cpu_ptr(port->pcpu, cpu); 3409 3410 hrtimer_cancel(&port_pcpu->tx_done_timer); 3411 port_pcpu->timer_scheduled = false; 3412 tasklet_kill(&port_pcpu->tx_done_tasklet); 3413 } 3414 } 3415 mvpp2_cleanup_rxqs(port); 3416 mvpp2_cleanup_txqs(port); 3417 3418 cancel_delayed_work_sync(&port->stats_work); 3419 3420 return 0; 3421 } 3422 3423 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, 3424 struct netdev_hw_addr_list *list) 3425 { 3426 struct netdev_hw_addr *ha; 3427 int ret; 3428 3429 netdev_hw_addr_list_for_each(ha, list) { 3430 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); 3431 if (ret) 3432 return ret; 3433 } 3434 3435 return 0; 3436 } 3437 3438 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) 3439 { 3440 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 3441 mvpp2_prs_vid_enable_filtering(port); 3442 else 3443 mvpp2_prs_vid_disable_filtering(port); 3444 3445 mvpp2_prs_mac_promisc_set(port->priv, port->id, 3446 MVPP2_PRS_L2_UNI_CAST, enable); 3447 3448 mvpp2_prs_mac_promisc_set(port->priv, port->id, 3449 MVPP2_PRS_L2_MULTI_CAST, enable); 3450 } 3451 3452 static void mvpp2_set_rx_mode(struct net_device *dev) 3453 { 3454 struct mvpp2_port *port = netdev_priv(dev); 3455 3456 /* Clear the whole UC and MC list */ 3457 mvpp2_prs_mac_del_all(port); 3458 3459 if (dev->flags & IFF_PROMISC) { 3460 mvpp2_set_rx_promisc(port, true); 3461 return; 3462 } 3463 3464 mvpp2_set_rx_promisc(port, false); 3465 3466 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || 3467 mvpp2_prs_mac_da_accept_list(port, &dev->uc)) 3468 mvpp2_prs_mac_promisc_set(port->priv, port->id, 3469 MVPP2_PRS_L2_UNI_CAST, true); 3470 3471 if (dev->flags & IFF_ALLMULTI) { 3472 mvpp2_prs_mac_promisc_set(port->priv, port->id, 3473 MVPP2_PRS_L2_MULTI_CAST, true); 3474 return; 3475 } 3476 3477 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || 3478 mvpp2_prs_mac_da_accept_list(port, &dev->mc)) 3479 mvpp2_prs_mac_promisc_set(port->priv, port->id, 3480 MVPP2_PRS_L2_MULTI_CAST, true); 3481 } 3482 3483 static int mvpp2_set_mac_address(struct net_device *dev, void *p) 3484 { 3485 const struct sockaddr *addr = p; 3486 int err; 3487 3488 if (!is_valid_ether_addr(addr->sa_data)) 3489 return -EADDRNOTAVAIL; 3490 3491 err = mvpp2_prs_update_mac_da(dev, addr->sa_data); 3492 if (err) { 3493 /* Reconfigure parser accept the original MAC address */ 3494 mvpp2_prs_update_mac_da(dev, dev->dev_addr); 3495 netdev_err(dev, "failed to change MAC address\n"); 3496 } 3497 return err; 3498 } 3499 3500 static int mvpp2_change_mtu(struct net_device *dev, int mtu) 3501 { 3502 struct mvpp2_port *port = netdev_priv(dev); 3503 int err; 3504 3505 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { 3506 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, 3507 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); 3508 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); 3509 } 3510 3511 if (!netif_running(dev)) { 3512 err = mvpp2_bm_update_mtu(dev, mtu); 3513 if (!err) { 3514 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); 3515 return 0; 3516 } 3517 3518 /* Reconfigure BM to the original MTU */ 3519 err = mvpp2_bm_update_mtu(dev, dev->mtu); 3520 if (err) 3521 goto log_error; 3522 } 3523 3524 mvpp2_stop_dev(port); 3525 3526 err = mvpp2_bm_update_mtu(dev, mtu); 3527 if (!err) { 3528 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); 3529 goto out_start; 3530 } 3531 3532 /* Reconfigure BM to the original MTU */ 3533 err = mvpp2_bm_update_mtu(dev, dev->mtu); 3534 if (err) 3535 goto log_error; 3536 3537 out_start: 3538 mvpp2_start_dev(port); 3539 mvpp2_egress_enable(port); 3540 mvpp2_ingress_enable(port); 3541 3542 return 0; 3543 log_error: 3544 netdev_err(dev, "failed to change MTU\n"); 3545 return err; 3546 } 3547 3548 static void 3549 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 3550 { 3551 struct mvpp2_port *port = netdev_priv(dev); 3552 unsigned int start; 3553 int cpu; 3554 3555 for_each_possible_cpu(cpu) { 3556 struct mvpp2_pcpu_stats *cpu_stats; 3557 u64 rx_packets; 3558 u64 rx_bytes; 3559 u64 tx_packets; 3560 u64 tx_bytes; 3561 3562 cpu_stats = per_cpu_ptr(port->stats, cpu); 3563 do { 3564 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 3565 rx_packets = cpu_stats->rx_packets; 3566 rx_bytes = cpu_stats->rx_bytes; 3567 tx_packets = cpu_stats->tx_packets; 3568 tx_bytes = cpu_stats->tx_bytes; 3569 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 3570 3571 stats->rx_packets += rx_packets; 3572 stats->rx_bytes += rx_bytes; 3573 stats->tx_packets += tx_packets; 3574 stats->tx_bytes += tx_bytes; 3575 } 3576 3577 stats->rx_errors = dev->stats.rx_errors; 3578 stats->rx_dropped = dev->stats.rx_dropped; 3579 stats->tx_dropped = dev->stats.tx_dropped; 3580 } 3581 3582 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 3583 { 3584 struct mvpp2_port *port = netdev_priv(dev); 3585 3586 if (!port->phylink) 3587 return -ENOTSUPP; 3588 3589 return phylink_mii_ioctl(port->phylink, ifr, cmd); 3590 } 3591 3592 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 3593 { 3594 struct mvpp2_port *port = netdev_priv(dev); 3595 int ret; 3596 3597 ret = mvpp2_prs_vid_entry_add(port, vid); 3598 if (ret) 3599 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", 3600 MVPP2_PRS_VLAN_FILT_MAX - 1); 3601 return ret; 3602 } 3603 3604 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 3605 { 3606 struct mvpp2_port *port = netdev_priv(dev); 3607 3608 mvpp2_prs_vid_entry_remove(port, vid); 3609 return 0; 3610 } 3611 3612 static int mvpp2_set_features(struct net_device *dev, 3613 netdev_features_t features) 3614 { 3615 netdev_features_t changed = dev->features ^ features; 3616 struct mvpp2_port *port = netdev_priv(dev); 3617 3618 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { 3619 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 3620 mvpp2_prs_vid_enable_filtering(port); 3621 } else { 3622 /* Invalidate all registered VID filters for this 3623 * port 3624 */ 3625 mvpp2_prs_vid_remove_all(port); 3626 3627 mvpp2_prs_vid_disable_filtering(port); 3628 } 3629 } 3630 3631 if (changed & NETIF_F_RXHASH) { 3632 if (features & NETIF_F_RXHASH) 3633 mvpp22_rss_enable(port); 3634 else 3635 mvpp22_rss_disable(port); 3636 } 3637 3638 return 0; 3639 } 3640 3641 /* Ethtool methods */ 3642 3643 static int mvpp2_ethtool_nway_reset(struct net_device *dev) 3644 { 3645 struct mvpp2_port *port = netdev_priv(dev); 3646 3647 if (!port->phylink) 3648 return -ENOTSUPP; 3649 3650 return phylink_ethtool_nway_reset(port->phylink); 3651 } 3652 3653 /* Set interrupt coalescing for ethtools */ 3654 static int mvpp2_ethtool_set_coalesce(struct net_device *dev, 3655 struct ethtool_coalesce *c) 3656 { 3657 struct mvpp2_port *port = netdev_priv(dev); 3658 int queue; 3659 3660 for (queue = 0; queue < port->nrxqs; queue++) { 3661 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 3662 3663 rxq->time_coal = c->rx_coalesce_usecs; 3664 rxq->pkts_coal = c->rx_max_coalesced_frames; 3665 mvpp2_rx_pkts_coal_set(port, rxq); 3666 mvpp2_rx_time_coal_set(port, rxq); 3667 } 3668 3669 if (port->has_tx_irqs) { 3670 port->tx_time_coal = c->tx_coalesce_usecs; 3671 mvpp2_tx_time_coal_set(port); 3672 } 3673 3674 for (queue = 0; queue < port->ntxqs; queue++) { 3675 struct mvpp2_tx_queue *txq = port->txqs[queue]; 3676 3677 txq->done_pkts_coal = c->tx_max_coalesced_frames; 3678 3679 if (port->has_tx_irqs) 3680 mvpp2_tx_pkts_coal_set(port, txq); 3681 } 3682 3683 return 0; 3684 } 3685 3686 /* get coalescing for ethtools */ 3687 static int mvpp2_ethtool_get_coalesce(struct net_device *dev, 3688 struct ethtool_coalesce *c) 3689 { 3690 struct mvpp2_port *port = netdev_priv(dev); 3691 3692 c->rx_coalesce_usecs = port->rxqs[0]->time_coal; 3693 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; 3694 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; 3695 c->tx_coalesce_usecs = port->tx_time_coal; 3696 return 0; 3697 } 3698 3699 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, 3700 struct ethtool_drvinfo *drvinfo) 3701 { 3702 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, 3703 sizeof(drvinfo->driver)); 3704 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, 3705 sizeof(drvinfo->version)); 3706 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 3707 sizeof(drvinfo->bus_info)); 3708 } 3709 3710 static void mvpp2_ethtool_get_ringparam(struct net_device *dev, 3711 struct ethtool_ringparam *ring) 3712 { 3713 struct mvpp2_port *port = netdev_priv(dev); 3714 3715 ring->rx_max_pending = MVPP2_MAX_RXD_MAX; 3716 ring->tx_max_pending = MVPP2_MAX_TXD_MAX; 3717 ring->rx_pending = port->rx_ring_size; 3718 ring->tx_pending = port->tx_ring_size; 3719 } 3720 3721 static int mvpp2_ethtool_set_ringparam(struct net_device *dev, 3722 struct ethtool_ringparam *ring) 3723 { 3724 struct mvpp2_port *port = netdev_priv(dev); 3725 u16 prev_rx_ring_size = port->rx_ring_size; 3726 u16 prev_tx_ring_size = port->tx_ring_size; 3727 int err; 3728 3729 err = mvpp2_check_ringparam_valid(dev, ring); 3730 if (err) 3731 return err; 3732 3733 if (!netif_running(dev)) { 3734 port->rx_ring_size = ring->rx_pending; 3735 port->tx_ring_size = ring->tx_pending; 3736 return 0; 3737 } 3738 3739 /* The interface is running, so we have to force a 3740 * reallocation of the queues 3741 */ 3742 mvpp2_stop_dev(port); 3743 mvpp2_cleanup_rxqs(port); 3744 mvpp2_cleanup_txqs(port); 3745 3746 port->rx_ring_size = ring->rx_pending; 3747 port->tx_ring_size = ring->tx_pending; 3748 3749 err = mvpp2_setup_rxqs(port); 3750 if (err) { 3751 /* Reallocate Rx queues with the original ring size */ 3752 port->rx_ring_size = prev_rx_ring_size; 3753 ring->rx_pending = prev_rx_ring_size; 3754 err = mvpp2_setup_rxqs(port); 3755 if (err) 3756 goto err_out; 3757 } 3758 err = mvpp2_setup_txqs(port); 3759 if (err) { 3760 /* Reallocate Tx queues with the original ring size */ 3761 port->tx_ring_size = prev_tx_ring_size; 3762 ring->tx_pending = prev_tx_ring_size; 3763 err = mvpp2_setup_txqs(port); 3764 if (err) 3765 goto err_clean_rxqs; 3766 } 3767 3768 mvpp2_start_dev(port); 3769 mvpp2_egress_enable(port); 3770 mvpp2_ingress_enable(port); 3771 3772 return 0; 3773 3774 err_clean_rxqs: 3775 mvpp2_cleanup_rxqs(port); 3776 err_out: 3777 netdev_err(dev, "failed to change ring parameters"); 3778 return err; 3779 } 3780 3781 static void mvpp2_ethtool_get_pause_param(struct net_device *dev, 3782 struct ethtool_pauseparam *pause) 3783 { 3784 struct mvpp2_port *port = netdev_priv(dev); 3785 3786 if (!port->phylink) 3787 return; 3788 3789 phylink_ethtool_get_pauseparam(port->phylink, pause); 3790 } 3791 3792 static int mvpp2_ethtool_set_pause_param(struct net_device *dev, 3793 struct ethtool_pauseparam *pause) 3794 { 3795 struct mvpp2_port *port = netdev_priv(dev); 3796 3797 if (!port->phylink) 3798 return -ENOTSUPP; 3799 3800 return phylink_ethtool_set_pauseparam(port->phylink, pause); 3801 } 3802 3803 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, 3804 struct ethtool_link_ksettings *cmd) 3805 { 3806 struct mvpp2_port *port = netdev_priv(dev); 3807 3808 if (!port->phylink) 3809 return -ENOTSUPP; 3810 3811 return phylink_ethtool_ksettings_get(port->phylink, cmd); 3812 } 3813 3814 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, 3815 const struct ethtool_link_ksettings *cmd) 3816 { 3817 struct mvpp2_port *port = netdev_priv(dev); 3818 3819 if (!port->phylink) 3820 return -ENOTSUPP; 3821 3822 return phylink_ethtool_ksettings_set(port->phylink, cmd); 3823 } 3824 3825 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, 3826 struct ethtool_rxnfc *info, u32 *rules) 3827 { 3828 struct mvpp2_port *port = netdev_priv(dev); 3829 int ret = 0; 3830 3831 if (!mvpp22_rss_is_supported()) 3832 return -EOPNOTSUPP; 3833 3834 switch (info->cmd) { 3835 case ETHTOOL_GRXFH: 3836 ret = mvpp2_ethtool_rxfh_get(port, info); 3837 break; 3838 case ETHTOOL_GRXRINGS: 3839 info->data = port->nrxqs; 3840 break; 3841 default: 3842 return -ENOTSUPP; 3843 } 3844 3845 return ret; 3846 } 3847 3848 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, 3849 struct ethtool_rxnfc *info) 3850 { 3851 struct mvpp2_port *port = netdev_priv(dev); 3852 int ret = 0; 3853 3854 if (!mvpp22_rss_is_supported()) 3855 return -EOPNOTSUPP; 3856 3857 switch (info->cmd) { 3858 case ETHTOOL_SRXFH: 3859 ret = mvpp2_ethtool_rxfh_set(port, info); 3860 break; 3861 default: 3862 return -EOPNOTSUPP; 3863 } 3864 return ret; 3865 } 3866 3867 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) 3868 { 3869 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0; 3870 } 3871 3872 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 3873 u8 *hfunc) 3874 { 3875 struct mvpp2_port *port = netdev_priv(dev); 3876 3877 if (!mvpp22_rss_is_supported()) 3878 return -EOPNOTSUPP; 3879 3880 if (indir) 3881 memcpy(indir, port->indir, 3882 ARRAY_SIZE(port->indir) * sizeof(port->indir[0])); 3883 3884 if (hfunc) 3885 *hfunc = ETH_RSS_HASH_CRC32; 3886 3887 return 0; 3888 } 3889 3890 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, 3891 const u8 *key, const u8 hfunc) 3892 { 3893 struct mvpp2_port *port = netdev_priv(dev); 3894 3895 if (!mvpp22_rss_is_supported()) 3896 return -EOPNOTSUPP; 3897 3898 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) 3899 return -EOPNOTSUPP; 3900 3901 if (key) 3902 return -EOPNOTSUPP; 3903 3904 if (indir) { 3905 memcpy(port->indir, indir, 3906 ARRAY_SIZE(port->indir) * sizeof(port->indir[0])); 3907 mvpp22_rss_fill_table(port, port->id); 3908 } 3909 3910 return 0; 3911 } 3912 3913 /* Device ops */ 3914 3915 static const struct net_device_ops mvpp2_netdev_ops = { 3916 .ndo_open = mvpp2_open, 3917 .ndo_stop = mvpp2_stop, 3918 .ndo_start_xmit = mvpp2_tx, 3919 .ndo_set_rx_mode = mvpp2_set_rx_mode, 3920 .ndo_set_mac_address = mvpp2_set_mac_address, 3921 .ndo_change_mtu = mvpp2_change_mtu, 3922 .ndo_get_stats64 = mvpp2_get_stats64, 3923 .ndo_do_ioctl = mvpp2_ioctl, 3924 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, 3925 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, 3926 .ndo_set_features = mvpp2_set_features, 3927 }; 3928 3929 static const struct ethtool_ops mvpp2_eth_tool_ops = { 3930 .nway_reset = mvpp2_ethtool_nway_reset, 3931 .get_link = ethtool_op_get_link, 3932 .set_coalesce = mvpp2_ethtool_set_coalesce, 3933 .get_coalesce = mvpp2_ethtool_get_coalesce, 3934 .get_drvinfo = mvpp2_ethtool_get_drvinfo, 3935 .get_ringparam = mvpp2_ethtool_get_ringparam, 3936 .set_ringparam = mvpp2_ethtool_set_ringparam, 3937 .get_strings = mvpp2_ethtool_get_strings, 3938 .get_ethtool_stats = mvpp2_ethtool_get_stats, 3939 .get_sset_count = mvpp2_ethtool_get_sset_count, 3940 .get_pauseparam = mvpp2_ethtool_get_pause_param, 3941 .set_pauseparam = mvpp2_ethtool_set_pause_param, 3942 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, 3943 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, 3944 .get_rxnfc = mvpp2_ethtool_get_rxnfc, 3945 .set_rxnfc = mvpp2_ethtool_set_rxnfc, 3946 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, 3947 .get_rxfh = mvpp2_ethtool_get_rxfh, 3948 .set_rxfh = mvpp2_ethtool_set_rxfh, 3949 3950 }; 3951 3952 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that 3953 * had a single IRQ defined per-port. 3954 */ 3955 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, 3956 struct device_node *port_node) 3957 { 3958 struct mvpp2_queue_vector *v = &port->qvecs[0]; 3959 3960 v->first_rxq = 0; 3961 v->nrxqs = port->nrxqs; 3962 v->type = MVPP2_QUEUE_VECTOR_SHARED; 3963 v->sw_thread_id = 0; 3964 v->sw_thread_mask = *cpumask_bits(cpu_online_mask); 3965 v->port = port; 3966 v->irq = irq_of_parse_and_map(port_node, 0); 3967 if (v->irq <= 0) 3968 return -EINVAL; 3969 netif_napi_add(port->dev, &v->napi, mvpp2_poll, 3970 NAPI_POLL_WEIGHT); 3971 3972 port->nqvecs = 1; 3973 3974 return 0; 3975 } 3976 3977 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, 3978 struct device_node *port_node) 3979 { 3980 struct mvpp2_queue_vector *v; 3981 int i, ret; 3982 3983 port->nqvecs = num_possible_cpus(); 3984 if (queue_mode == MVPP2_QDIST_SINGLE_MODE) 3985 port->nqvecs += 1; 3986 3987 for (i = 0; i < port->nqvecs; i++) { 3988 char irqname[16]; 3989 3990 v = port->qvecs + i; 3991 3992 v->port = port; 3993 v->type = MVPP2_QUEUE_VECTOR_PRIVATE; 3994 v->sw_thread_id = i; 3995 v->sw_thread_mask = BIT(i); 3996 3997 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); 3998 3999 if (queue_mode == MVPP2_QDIST_MULTI_MODE) { 4000 v->first_rxq = i * MVPP2_DEFAULT_RXQ; 4001 v->nrxqs = MVPP2_DEFAULT_RXQ; 4002 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && 4003 i == (port->nqvecs - 1)) { 4004 v->first_rxq = 0; 4005 v->nrxqs = port->nrxqs; 4006 v->type = MVPP2_QUEUE_VECTOR_SHARED; 4007 strncpy(irqname, "rx-shared", sizeof(irqname)); 4008 } 4009 4010 if (port_node) 4011 v->irq = of_irq_get_byname(port_node, irqname); 4012 else 4013 v->irq = fwnode_irq_get(port->fwnode, i); 4014 if (v->irq <= 0) { 4015 ret = -EINVAL; 4016 goto err; 4017 } 4018 4019 netif_napi_add(port->dev, &v->napi, mvpp2_poll, 4020 NAPI_POLL_WEIGHT); 4021 } 4022 4023 return 0; 4024 4025 err: 4026 for (i = 0; i < port->nqvecs; i++) 4027 irq_dispose_mapping(port->qvecs[i].irq); 4028 return ret; 4029 } 4030 4031 static int mvpp2_queue_vectors_init(struct mvpp2_port *port, 4032 struct device_node *port_node) 4033 { 4034 if (port->has_tx_irqs) 4035 return mvpp2_multi_queue_vectors_init(port, port_node); 4036 else 4037 return mvpp2_simple_queue_vectors_init(port, port_node); 4038 } 4039 4040 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) 4041 { 4042 int i; 4043 4044 for (i = 0; i < port->nqvecs; i++) 4045 irq_dispose_mapping(port->qvecs[i].irq); 4046 } 4047 4048 /* Configure Rx queue group interrupt for this port */ 4049 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) 4050 { 4051 struct mvpp2 *priv = port->priv; 4052 u32 val; 4053 int i; 4054 4055 if (priv->hw_version == MVPP21) { 4056 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), 4057 port->nrxqs); 4058 return; 4059 } 4060 4061 /* Handle the more complicated PPv2.2 case */ 4062 for (i = 0; i < port->nqvecs; i++) { 4063 struct mvpp2_queue_vector *qv = port->qvecs + i; 4064 4065 if (!qv->nrxqs) 4066 continue; 4067 4068 val = qv->sw_thread_id; 4069 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; 4070 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 4071 4072 val = qv->first_rxq; 4073 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; 4074 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 4075 } 4076 } 4077 4078 /* Initialize port HW */ 4079 static int mvpp2_port_init(struct mvpp2_port *port) 4080 { 4081 struct device *dev = port->dev->dev.parent; 4082 struct mvpp2 *priv = port->priv; 4083 struct mvpp2_txq_pcpu *txq_pcpu; 4084 int queue, cpu, err; 4085 4086 /* Checks for hardware constraints */ 4087 if (port->first_rxq + port->nrxqs > 4088 MVPP2_MAX_PORTS * priv->max_port_rxqs) 4089 return -EINVAL; 4090 4091 if (port->nrxqs % MVPP2_DEFAULT_RXQ || 4092 port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) 4093 return -EINVAL; 4094 4095 /* Disable port */ 4096 mvpp2_egress_disable(port); 4097 mvpp2_port_disable(port); 4098 4099 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; 4100 4101 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), 4102 GFP_KERNEL); 4103 if (!port->txqs) 4104 return -ENOMEM; 4105 4106 /* Associate physical Tx queues to this port and initialize. 4107 * The mapping is predefined. 4108 */ 4109 for (queue = 0; queue < port->ntxqs; queue++) { 4110 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 4111 struct mvpp2_tx_queue *txq; 4112 4113 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 4114 if (!txq) { 4115 err = -ENOMEM; 4116 goto err_free_percpu; 4117 } 4118 4119 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); 4120 if (!txq->pcpu) { 4121 err = -ENOMEM; 4122 goto err_free_percpu; 4123 } 4124 4125 txq->id = queue_phy_id; 4126 txq->log_id = queue; 4127 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 4128 for_each_present_cpu(cpu) { 4129 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4130 txq_pcpu->cpu = cpu; 4131 } 4132 4133 port->txqs[queue] = txq; 4134 } 4135 4136 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), 4137 GFP_KERNEL); 4138 if (!port->rxqs) { 4139 err = -ENOMEM; 4140 goto err_free_percpu; 4141 } 4142 4143 /* Allocate and initialize Rx queue for this port */ 4144 for (queue = 0; queue < port->nrxqs; queue++) { 4145 struct mvpp2_rx_queue *rxq; 4146 4147 /* Map physical Rx queue to port's logical Rx queue */ 4148 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 4149 if (!rxq) { 4150 err = -ENOMEM; 4151 goto err_free_percpu; 4152 } 4153 /* Map this Rx queue to a physical queue */ 4154 rxq->id = port->first_rxq + queue; 4155 rxq->port = port->id; 4156 rxq->logic_rxq = queue; 4157 4158 port->rxqs[queue] = rxq; 4159 } 4160 4161 mvpp2_rx_irqs_setup(port); 4162 4163 /* Create Rx descriptor rings */ 4164 for (queue = 0; queue < port->nrxqs; queue++) { 4165 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 4166 4167 rxq->size = port->rx_ring_size; 4168 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 4169 rxq->time_coal = MVPP2_RX_COAL_USEC; 4170 } 4171 4172 mvpp2_ingress_disable(port); 4173 4174 /* Port default configuration */ 4175 mvpp2_defaults_set(port); 4176 4177 /* Port's classifier configuration */ 4178 mvpp2_cls_oversize_rxq_set(port); 4179 mvpp2_cls_port_config(port); 4180 4181 if (mvpp22_rss_is_supported()) 4182 mvpp22_rss_port_init(port); 4183 4184 /* Provide an initial Rx packet size */ 4185 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); 4186 4187 /* Initialize pools for swf */ 4188 err = mvpp2_swf_bm_pool_init(port); 4189 if (err) 4190 goto err_free_percpu; 4191 4192 return 0; 4193 4194 err_free_percpu: 4195 for (queue = 0; queue < port->ntxqs; queue++) { 4196 if (!port->txqs[queue]) 4197 continue; 4198 free_percpu(port->txqs[queue]->pcpu); 4199 } 4200 return err; 4201 } 4202 4203 /* Checks if the port DT description has the TX interrupts 4204 * described. On PPv2.1, there are no such interrupts. On PPv2.2, 4205 * there are available, but we need to keep support for old DTs. 4206 */ 4207 static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, 4208 struct device_node *port_node) 4209 { 4210 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", 4211 "tx-cpu2", "tx-cpu3" }; 4212 int ret, i; 4213 4214 if (priv->hw_version == MVPP21) 4215 return false; 4216 4217 for (i = 0; i < 5; i++) { 4218 ret = of_property_match_string(port_node, "interrupt-names", 4219 irqs[i]); 4220 if (ret < 0) 4221 return false; 4222 } 4223 4224 return true; 4225 } 4226 4227 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, 4228 struct fwnode_handle *fwnode, 4229 char **mac_from) 4230 { 4231 struct mvpp2_port *port = netdev_priv(dev); 4232 char hw_mac_addr[ETH_ALEN] = {0}; 4233 char fw_mac_addr[ETH_ALEN]; 4234 4235 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { 4236 *mac_from = "firmware node"; 4237 ether_addr_copy(dev->dev_addr, fw_mac_addr); 4238 return; 4239 } 4240 4241 if (priv->hw_version == MVPP21) { 4242 mvpp21_get_mac_address(port, hw_mac_addr); 4243 if (is_valid_ether_addr(hw_mac_addr)) { 4244 *mac_from = "hardware"; 4245 ether_addr_copy(dev->dev_addr, hw_mac_addr); 4246 return; 4247 } 4248 } 4249 4250 *mac_from = "random"; 4251 eth_hw_addr_random(dev); 4252 } 4253 4254 static void mvpp2_phylink_validate(struct net_device *dev, 4255 unsigned long *supported, 4256 struct phylink_link_state *state) 4257 { 4258 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 4259 4260 phylink_set(mask, Autoneg); 4261 phylink_set_port_modes(mask); 4262 phylink_set(mask, Pause); 4263 phylink_set(mask, Asym_Pause); 4264 4265 switch (state->interface) { 4266 case PHY_INTERFACE_MODE_10GKR: 4267 phylink_set(mask, 10000baseCR_Full); 4268 phylink_set(mask, 10000baseSR_Full); 4269 phylink_set(mask, 10000baseLR_Full); 4270 phylink_set(mask, 10000baseLRM_Full); 4271 phylink_set(mask, 10000baseER_Full); 4272 phylink_set(mask, 10000baseKR_Full); 4273 /* Fall-through */ 4274 default: 4275 phylink_set(mask, 10baseT_Half); 4276 phylink_set(mask, 10baseT_Full); 4277 phylink_set(mask, 100baseT_Half); 4278 phylink_set(mask, 100baseT_Full); 4279 phylink_set(mask, 10000baseT_Full); 4280 /* Fall-through */ 4281 case PHY_INTERFACE_MODE_1000BASEX: 4282 case PHY_INTERFACE_MODE_2500BASEX: 4283 phylink_set(mask, 1000baseT_Full); 4284 phylink_set(mask, 1000baseX_Full); 4285 phylink_set(mask, 2500baseX_Full); 4286 } 4287 4288 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 4289 bitmap_and(state->advertising, state->advertising, mask, 4290 __ETHTOOL_LINK_MODE_MASK_NBITS); 4291 } 4292 4293 static void mvpp22_xlg_link_state(struct mvpp2_port *port, 4294 struct phylink_link_state *state) 4295 { 4296 u32 val; 4297 4298 state->speed = SPEED_10000; 4299 state->duplex = 1; 4300 state->an_complete = 1; 4301 4302 val = readl(port->base + MVPP22_XLG_STATUS); 4303 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); 4304 4305 state->pause = 0; 4306 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 4307 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) 4308 state->pause |= MLO_PAUSE_TX; 4309 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) 4310 state->pause |= MLO_PAUSE_RX; 4311 } 4312 4313 static void mvpp2_gmac_link_state(struct mvpp2_port *port, 4314 struct phylink_link_state *state) 4315 { 4316 u32 val; 4317 4318 val = readl(port->base + MVPP2_GMAC_STATUS0); 4319 4320 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); 4321 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); 4322 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); 4323 4324 switch (port->phy_interface) { 4325 case PHY_INTERFACE_MODE_1000BASEX: 4326 state->speed = SPEED_1000; 4327 break; 4328 case PHY_INTERFACE_MODE_2500BASEX: 4329 state->speed = SPEED_2500; 4330 break; 4331 default: 4332 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) 4333 state->speed = SPEED_1000; 4334 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) 4335 state->speed = SPEED_100; 4336 else 4337 state->speed = SPEED_10; 4338 } 4339 4340 state->pause = 0; 4341 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) 4342 state->pause |= MLO_PAUSE_RX; 4343 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) 4344 state->pause |= MLO_PAUSE_TX; 4345 } 4346 4347 static int mvpp2_phylink_mac_link_state(struct net_device *dev, 4348 struct phylink_link_state *state) 4349 { 4350 struct mvpp2_port *port = netdev_priv(dev); 4351 4352 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { 4353 u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); 4354 mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 4355 4356 if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) { 4357 mvpp22_xlg_link_state(port, state); 4358 return 1; 4359 } 4360 } 4361 4362 mvpp2_gmac_link_state(port, state); 4363 return 1; 4364 } 4365 4366 static void mvpp2_mac_an_restart(struct net_device *dev) 4367 { 4368 struct mvpp2_port *port = netdev_priv(dev); 4369 u32 val; 4370 4371 if (port->phy_interface != PHY_INTERFACE_MODE_SGMII) 4372 return; 4373 4374 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4375 /* The RESTART_AN bit is cleared by the h/w after restarting the AN 4376 * process. 4377 */ 4378 val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG; 4379 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4380 } 4381 4382 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, 4383 const struct phylink_link_state *state) 4384 { 4385 u32 ctrl0, ctrl4; 4386 4387 ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); 4388 ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); 4389 4390 if (state->pause & MLO_PAUSE_TX) 4391 ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; 4392 if (state->pause & MLO_PAUSE_RX) 4393 ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; 4394 4395 ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; 4396 ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | 4397 MVPP22_XLG_CTRL4_EN_IDLE_CHECK; 4398 4399 writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); 4400 writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); 4401 } 4402 4403 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, 4404 const struct phylink_link_state *state) 4405 { 4406 u32 an, ctrl0, ctrl2, ctrl4; 4407 4408 an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4409 ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 4410 ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 4411 ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); 4412 4413 /* Force link down */ 4414 an &= ~MVPP2_GMAC_FORCE_LINK_PASS; 4415 an |= MVPP2_GMAC_FORCE_LINK_DOWN; 4416 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4417 4418 /* Set the GMAC in a reset state */ 4419 ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; 4420 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); 4421 4422 an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | 4423 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | 4424 MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | 4425 MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN | 4426 MVPP2_GMAC_FORCE_LINK_DOWN); 4427 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; 4428 ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); 4429 4430 if (state->interface == PHY_INTERFACE_MODE_1000BASEX || 4431 state->interface == PHY_INTERFACE_MODE_2500BASEX) { 4432 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can 4433 * they negotiate duplex: they are always operating with a fixed 4434 * speed of 1000/2500Mbps in full duplex, so force 1000/2500 4435 * speed and full duplex here. 4436 */ 4437 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; 4438 an |= MVPP2_GMAC_CONFIG_GMII_SPEED | 4439 MVPP2_GMAC_CONFIG_FULL_DUPLEX; 4440 } else if (!phy_interface_mode_is_rgmii(state->interface)) { 4441 an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG; 4442 } 4443 4444 if (state->duplex) 4445 an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 4446 if (phylink_test(state->advertising, Pause)) 4447 an |= MVPP2_GMAC_FC_ADV_EN; 4448 if (phylink_test(state->advertising, Asym_Pause)) 4449 an |= MVPP2_GMAC_FC_ADV_ASM_EN; 4450 4451 if (state->interface == PHY_INTERFACE_MODE_SGMII || 4452 state->interface == PHY_INTERFACE_MODE_1000BASEX || 4453 state->interface == PHY_INTERFACE_MODE_2500BASEX) { 4454 an |= MVPP2_GMAC_IN_BAND_AUTONEG; 4455 ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; 4456 4457 ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL | 4458 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); 4459 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | 4460 MVPP22_CTRL4_DP_CLK_SEL | 4461 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 4462 4463 if (state->pause & MLO_PAUSE_TX) 4464 ctrl4 |= MVPP22_CTRL4_TX_FC_EN; 4465 if (state->pause & MLO_PAUSE_RX) 4466 ctrl4 |= MVPP22_CTRL4_RX_FC_EN; 4467 } else if (phy_interface_mode_is_rgmii(state->interface)) { 4468 an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS; 4469 4470 if (state->speed == SPEED_1000) 4471 an |= MVPP2_GMAC_CONFIG_GMII_SPEED; 4472 else if (state->speed == SPEED_100) 4473 an |= MVPP2_GMAC_CONFIG_MII_SPEED; 4474 4475 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; 4476 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | 4477 MVPP22_CTRL4_SYNC_BYPASS_DIS | 4478 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 4479 } 4480 4481 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); 4482 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); 4483 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); 4484 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4485 } 4486 4487 static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, 4488 const struct phylink_link_state *state) 4489 { 4490 struct mvpp2_port *port = netdev_priv(dev); 4491 4492 /* Check for invalid configuration */ 4493 if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) { 4494 netdev_err(dev, "Invalid mode on %s\n", dev->name); 4495 return; 4496 } 4497 4498 netif_tx_stop_all_queues(port->dev); 4499 if (!port->has_phy) 4500 netif_carrier_off(port->dev); 4501 4502 /* Make sure the port is disabled when reconfiguring the mode */ 4503 mvpp2_port_disable(port); 4504 4505 if (port->priv->hw_version == MVPP22 && 4506 port->phy_interface != state->interface) { 4507 port->phy_interface = state->interface; 4508 4509 /* Reconfigure the serdes lanes */ 4510 phy_power_off(port->comphy); 4511 mvpp22_mode_reconfigure(port); 4512 } 4513 4514 /* mac (re)configuration */ 4515 if (state->interface == PHY_INTERFACE_MODE_10GKR) 4516 mvpp2_xlg_config(port, mode, state); 4517 else if (phy_interface_mode_is_rgmii(state->interface) || 4518 state->interface == PHY_INTERFACE_MODE_SGMII || 4519 state->interface == PHY_INTERFACE_MODE_1000BASEX || 4520 state->interface == PHY_INTERFACE_MODE_2500BASEX) 4521 mvpp2_gmac_config(port, mode, state); 4522 4523 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) 4524 mvpp2_port_loopback_set(port, state); 4525 4526 /* If the port already was up, make sure it's still in the same state */ 4527 if (state->link || !port->has_phy) { 4528 mvpp2_port_enable(port); 4529 4530 mvpp2_egress_enable(port); 4531 mvpp2_ingress_enable(port); 4532 if (!port->has_phy) 4533 netif_carrier_on(dev); 4534 netif_tx_wake_all_queues(dev); 4535 } 4536 } 4537 4538 static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, 4539 phy_interface_t interface, struct phy_device *phy) 4540 { 4541 struct mvpp2_port *port = netdev_priv(dev); 4542 u32 val; 4543 4544 if (!phylink_autoneg_inband(mode) && 4545 interface != PHY_INTERFACE_MODE_10GKR) { 4546 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4547 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; 4548 if (phy_interface_mode_is_rgmii(interface)) 4549 val |= MVPP2_GMAC_FORCE_LINK_PASS; 4550 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4551 } 4552 4553 mvpp2_port_enable(port); 4554 4555 mvpp2_egress_enable(port); 4556 mvpp2_ingress_enable(port); 4557 netif_tx_wake_all_queues(dev); 4558 } 4559 4560 static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode, 4561 phy_interface_t interface) 4562 { 4563 struct mvpp2_port *port = netdev_priv(dev); 4564 u32 val; 4565 4566 if (!phylink_autoneg_inband(mode) && 4567 interface != PHY_INTERFACE_MODE_10GKR) { 4568 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4569 val &= ~MVPP2_GMAC_FORCE_LINK_PASS; 4570 val |= MVPP2_GMAC_FORCE_LINK_DOWN; 4571 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4572 } 4573 4574 netif_tx_stop_all_queues(dev); 4575 mvpp2_egress_disable(port); 4576 mvpp2_ingress_disable(port); 4577 4578 /* When using link interrupts to notify phylink of a MAC state change, 4579 * we do not want the port to be disabled (we want to receive further 4580 * interrupts, to be notified when the port will have a link later). 4581 */ 4582 if (!port->has_phy) 4583 return; 4584 4585 mvpp2_port_disable(port); 4586 } 4587 4588 static const struct phylink_mac_ops mvpp2_phylink_ops = { 4589 .validate = mvpp2_phylink_validate, 4590 .mac_link_state = mvpp2_phylink_mac_link_state, 4591 .mac_an_restart = mvpp2_mac_an_restart, 4592 .mac_config = mvpp2_mac_config, 4593 .mac_link_up = mvpp2_mac_link_up, 4594 .mac_link_down = mvpp2_mac_link_down, 4595 }; 4596 4597 /* Ports initialization */ 4598 static int mvpp2_port_probe(struct platform_device *pdev, 4599 struct fwnode_handle *port_fwnode, 4600 struct mvpp2 *priv) 4601 { 4602 struct phy *comphy = NULL; 4603 struct mvpp2_port *port; 4604 struct mvpp2_port_pcpu *port_pcpu; 4605 struct device_node *port_node = to_of_node(port_fwnode); 4606 struct net_device *dev; 4607 struct resource *res; 4608 struct phylink *phylink; 4609 char *mac_from = ""; 4610 unsigned int ntxqs, nrxqs; 4611 bool has_tx_irqs; 4612 u32 id; 4613 int features; 4614 int phy_mode; 4615 int err, i, cpu; 4616 4617 if (port_node) { 4618 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); 4619 } else { 4620 has_tx_irqs = true; 4621 queue_mode = MVPP2_QDIST_MULTI_MODE; 4622 } 4623 4624 if (!has_tx_irqs) 4625 queue_mode = MVPP2_QDIST_SINGLE_MODE; 4626 4627 ntxqs = MVPP2_MAX_TXQ; 4628 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) 4629 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); 4630 else 4631 nrxqs = MVPP2_DEFAULT_RXQ; 4632 4633 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); 4634 if (!dev) 4635 return -ENOMEM; 4636 4637 phy_mode = fwnode_get_phy_mode(port_fwnode); 4638 if (phy_mode < 0) { 4639 dev_err(&pdev->dev, "incorrect phy mode\n"); 4640 err = phy_mode; 4641 goto err_free_netdev; 4642 } 4643 4644 if (port_node) { 4645 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); 4646 if (IS_ERR(comphy)) { 4647 if (PTR_ERR(comphy) == -EPROBE_DEFER) { 4648 err = -EPROBE_DEFER; 4649 goto err_free_netdev; 4650 } 4651 comphy = NULL; 4652 } 4653 } 4654 4655 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { 4656 err = -EINVAL; 4657 dev_err(&pdev->dev, "missing port-id value\n"); 4658 goto err_free_netdev; 4659 } 4660 4661 dev->tx_queue_len = MVPP2_MAX_TXD_MAX; 4662 dev->watchdog_timeo = 5 * HZ; 4663 dev->netdev_ops = &mvpp2_netdev_ops; 4664 dev->ethtool_ops = &mvpp2_eth_tool_ops; 4665 4666 port = netdev_priv(dev); 4667 port->dev = dev; 4668 port->fwnode = port_fwnode; 4669 port->has_phy = !!of_find_property(port_node, "phy", NULL); 4670 port->ntxqs = ntxqs; 4671 port->nrxqs = nrxqs; 4672 port->priv = priv; 4673 port->has_tx_irqs = has_tx_irqs; 4674 4675 err = mvpp2_queue_vectors_init(port, port_node); 4676 if (err) 4677 goto err_free_netdev; 4678 4679 if (port_node) 4680 port->link_irq = of_irq_get_byname(port_node, "link"); 4681 else 4682 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); 4683 if (port->link_irq == -EPROBE_DEFER) { 4684 err = -EPROBE_DEFER; 4685 goto err_deinit_qvecs; 4686 } 4687 if (port->link_irq <= 0) 4688 /* the link irq is optional */ 4689 port->link_irq = 0; 4690 4691 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) 4692 port->flags |= MVPP2_F_LOOPBACK; 4693 4694 port->id = id; 4695 if (priv->hw_version == MVPP21) 4696 port->first_rxq = port->id * port->nrxqs; 4697 else 4698 port->first_rxq = port->id * priv->max_port_rxqs; 4699 4700 port->of_node = port_node; 4701 port->phy_interface = phy_mode; 4702 port->comphy = comphy; 4703 4704 if (priv->hw_version == MVPP21) { 4705 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); 4706 port->base = devm_ioremap_resource(&pdev->dev, res); 4707 if (IS_ERR(port->base)) { 4708 err = PTR_ERR(port->base); 4709 goto err_free_irq; 4710 } 4711 4712 port->stats_base = port->priv->lms_base + 4713 MVPP21_MIB_COUNTERS_OFFSET + 4714 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; 4715 } else { 4716 if (fwnode_property_read_u32(port_fwnode, "gop-port-id", 4717 &port->gop_id)) { 4718 err = -EINVAL; 4719 dev_err(&pdev->dev, "missing gop-port-id value\n"); 4720 goto err_deinit_qvecs; 4721 } 4722 4723 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); 4724 port->stats_base = port->priv->iface_base + 4725 MVPP22_MIB_COUNTERS_OFFSET + 4726 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; 4727 } 4728 4729 /* Alloc per-cpu and ethtool stats */ 4730 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); 4731 if (!port->stats) { 4732 err = -ENOMEM; 4733 goto err_free_irq; 4734 } 4735 4736 port->ethtool_stats = devm_kcalloc(&pdev->dev, 4737 ARRAY_SIZE(mvpp2_ethtool_regs), 4738 sizeof(u64), GFP_KERNEL); 4739 if (!port->ethtool_stats) { 4740 err = -ENOMEM; 4741 goto err_free_stats; 4742 } 4743 4744 mutex_init(&port->gather_stats_lock); 4745 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); 4746 4747 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); 4748 4749 port->tx_ring_size = MVPP2_MAX_TXD_DFLT; 4750 port->rx_ring_size = MVPP2_MAX_RXD_DFLT; 4751 SET_NETDEV_DEV(dev, &pdev->dev); 4752 4753 err = mvpp2_port_init(port); 4754 if (err < 0) { 4755 dev_err(&pdev->dev, "failed to init port %d\n", id); 4756 goto err_free_stats; 4757 } 4758 4759 mvpp2_port_periodic_xon_disable(port); 4760 4761 mvpp2_port_reset(port); 4762 4763 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); 4764 if (!port->pcpu) { 4765 err = -ENOMEM; 4766 goto err_free_txq_pcpu; 4767 } 4768 4769 if (!port->has_tx_irqs) { 4770 for_each_present_cpu(cpu) { 4771 port_pcpu = per_cpu_ptr(port->pcpu, cpu); 4772 4773 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, 4774 HRTIMER_MODE_REL_PINNED); 4775 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; 4776 port_pcpu->timer_scheduled = false; 4777 4778 tasklet_init(&port_pcpu->tx_done_tasklet, 4779 mvpp2_tx_proc_cb, 4780 (unsigned long)dev); 4781 } 4782 } 4783 4784 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4785 NETIF_F_TSO; 4786 dev->features = features | NETIF_F_RXCSUM; 4787 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | 4788 NETIF_F_HW_VLAN_CTAG_FILTER; 4789 4790 if (mvpp22_rss_is_supported()) 4791 dev->hw_features |= NETIF_F_RXHASH; 4792 4793 if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { 4794 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 4795 dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 4796 } 4797 4798 dev->vlan_features |= features; 4799 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; 4800 dev->priv_flags |= IFF_UNICAST_FLT; 4801 4802 /* MTU range: 68 - 9704 */ 4803 dev->min_mtu = ETH_MIN_MTU; 4804 /* 9704 == 9728 - 20 and rounding to 8 */ 4805 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; 4806 4807 /* Phylink isn't used w/ ACPI as of now */ 4808 if (port_node) { 4809 phylink = phylink_create(dev, port_fwnode, phy_mode, 4810 &mvpp2_phylink_ops); 4811 if (IS_ERR(phylink)) { 4812 err = PTR_ERR(phylink); 4813 goto err_free_port_pcpu; 4814 } 4815 port->phylink = phylink; 4816 } else { 4817 port->phylink = NULL; 4818 } 4819 4820 err = register_netdev(dev); 4821 if (err < 0) { 4822 dev_err(&pdev->dev, "failed to register netdev\n"); 4823 goto err_phylink; 4824 } 4825 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 4826 4827 priv->port_list[priv->port_count++] = port; 4828 4829 return 0; 4830 4831 err_phylink: 4832 if (port->phylink) 4833 phylink_destroy(port->phylink); 4834 err_free_port_pcpu: 4835 free_percpu(port->pcpu); 4836 err_free_txq_pcpu: 4837 for (i = 0; i < port->ntxqs; i++) 4838 free_percpu(port->txqs[i]->pcpu); 4839 err_free_stats: 4840 free_percpu(port->stats); 4841 err_free_irq: 4842 if (port->link_irq) 4843 irq_dispose_mapping(port->link_irq); 4844 err_deinit_qvecs: 4845 mvpp2_queue_vectors_deinit(port); 4846 err_free_netdev: 4847 free_netdev(dev); 4848 return err; 4849 } 4850 4851 /* Ports removal routine */ 4852 static void mvpp2_port_remove(struct mvpp2_port *port) 4853 { 4854 int i; 4855 4856 unregister_netdev(port->dev); 4857 if (port->phylink) 4858 phylink_destroy(port->phylink); 4859 free_percpu(port->pcpu); 4860 free_percpu(port->stats); 4861 for (i = 0; i < port->ntxqs; i++) 4862 free_percpu(port->txqs[i]->pcpu); 4863 mvpp2_queue_vectors_deinit(port); 4864 if (port->link_irq) 4865 irq_dispose_mapping(port->link_irq); 4866 free_netdev(port->dev); 4867 } 4868 4869 /* Initialize decoding windows */ 4870 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 4871 struct mvpp2 *priv) 4872 { 4873 u32 win_enable; 4874 int i; 4875 4876 for (i = 0; i < 6; i++) { 4877 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 4878 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 4879 4880 if (i < 4) 4881 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 4882 } 4883 4884 win_enable = 0; 4885 4886 for (i = 0; i < dram->num_cs; i++) { 4887 const struct mbus_dram_window *cs = dram->cs + i; 4888 4889 mvpp2_write(priv, MVPP2_WIN_BASE(i), 4890 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 4891 dram->mbus_dram_target_id); 4892 4893 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 4894 (cs->size - 1) & 0xffff0000); 4895 4896 win_enable |= (1 << i); 4897 } 4898 4899 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 4900 } 4901 4902 /* Initialize Rx FIFO's */ 4903 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 4904 { 4905 int port; 4906 4907 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4908 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4909 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 4910 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4911 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); 4912 } 4913 4914 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 4915 MVPP2_RX_FIFO_PORT_MIN_PKT); 4916 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 4917 } 4918 4919 static void mvpp22_rx_fifo_init(struct mvpp2 *priv) 4920 { 4921 int port; 4922 4923 /* The FIFO size parameters are set depending on the maximum speed a 4924 * given port can handle: 4925 * - Port 0: 10Gbps 4926 * - Port 1: 2.5Gbps 4927 * - Ports 2 and 3: 1Gbps 4928 */ 4929 4930 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), 4931 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); 4932 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), 4933 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); 4934 4935 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), 4936 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); 4937 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), 4938 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); 4939 4940 for (port = 2; port < MVPP2_MAX_PORTS; port++) { 4941 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4942 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 4943 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4944 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); 4945 } 4946 4947 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 4948 MVPP2_RX_FIFO_PORT_MIN_PKT); 4949 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 4950 } 4951 4952 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G 4953 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, 4954 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. 4955 */ 4956 static void mvpp22_tx_fifo_init(struct mvpp2 *priv) 4957 { 4958 int port, size, thrs; 4959 4960 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4961 if (port == 0) { 4962 size = MVPP22_TX_FIFO_DATA_SIZE_10KB; 4963 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; 4964 } else { 4965 size = MVPP22_TX_FIFO_DATA_SIZE_3KB; 4966 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; 4967 } 4968 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); 4969 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); 4970 } 4971 } 4972 4973 static void mvpp2_axi_init(struct mvpp2 *priv) 4974 { 4975 u32 val, rdval, wrval; 4976 4977 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 4978 4979 /* AXI Bridge Configuration */ 4980 4981 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 4982 << MVPP22_AXI_ATTR_CACHE_OFFS; 4983 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4984 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4985 4986 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 4987 << MVPP22_AXI_ATTR_CACHE_OFFS; 4988 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4989 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4990 4991 /* BM */ 4992 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 4993 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 4994 4995 /* Descriptors */ 4996 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 4997 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 4998 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 4999 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 5000 5001 /* Buffer Data */ 5002 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 5003 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 5004 5005 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 5006 << MVPP22_AXI_CODE_CACHE_OFFS; 5007 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 5008 << MVPP22_AXI_CODE_DOMAIN_OFFS; 5009 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 5010 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 5011 5012 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 5013 << MVPP22_AXI_CODE_CACHE_OFFS; 5014 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 5015 << MVPP22_AXI_CODE_DOMAIN_OFFS; 5016 5017 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 5018 5019 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 5020 << MVPP22_AXI_CODE_CACHE_OFFS; 5021 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 5022 << MVPP22_AXI_CODE_DOMAIN_OFFS; 5023 5024 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 5025 } 5026 5027 /* Initialize network controller common part HW */ 5028 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) 5029 { 5030 const struct mbus_dram_target_info *dram_target_info; 5031 int err, i; 5032 u32 val; 5033 5034 /* MBUS windows configuration */ 5035 dram_target_info = mv_mbus_dram_info(); 5036 if (dram_target_info) 5037 mvpp2_conf_mbus_windows(dram_target_info, priv); 5038 5039 if (priv->hw_version == MVPP22) 5040 mvpp2_axi_init(priv); 5041 5042 /* Disable HW PHY polling */ 5043 if (priv->hw_version == MVPP21) { 5044 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5045 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 5046 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5047 } else { 5048 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5049 val &= ~MVPP22_SMI_POLLING_EN; 5050 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5051 } 5052 5053 /* Allocate and initialize aggregated TXQs */ 5054 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), 5055 sizeof(*priv->aggr_txqs), 5056 GFP_KERNEL); 5057 if (!priv->aggr_txqs) 5058 return -ENOMEM; 5059 5060 for_each_present_cpu(i) { 5061 priv->aggr_txqs[i].id = i; 5062 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 5063 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); 5064 if (err < 0) 5065 return err; 5066 } 5067 5068 /* Fifo Init */ 5069 if (priv->hw_version == MVPP21) { 5070 mvpp2_rx_fifo_init(priv); 5071 } else { 5072 mvpp22_rx_fifo_init(priv); 5073 mvpp22_tx_fifo_init(priv); 5074 } 5075 5076 if (priv->hw_version == MVPP21) 5077 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 5078 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 5079 5080 /* Allow cache snoop when transmiting packets */ 5081 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 5082 5083 /* Buffer Manager initialization */ 5084 err = mvpp2_bm_init(pdev, priv); 5085 if (err < 0) 5086 return err; 5087 5088 /* Parser default initialization */ 5089 err = mvpp2_prs_default_init(pdev, priv); 5090 if (err < 0) 5091 return err; 5092 5093 /* Classifier default initialization */ 5094 mvpp2_cls_init(priv); 5095 5096 return 0; 5097 } 5098 5099 static int mvpp2_probe(struct platform_device *pdev) 5100 { 5101 const struct acpi_device_id *acpi_id; 5102 struct fwnode_handle *fwnode = pdev->dev.fwnode; 5103 struct fwnode_handle *port_fwnode; 5104 struct mvpp2 *priv; 5105 struct resource *res; 5106 void __iomem *base; 5107 int i; 5108 int err; 5109 5110 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 5111 if (!priv) 5112 return -ENOMEM; 5113 5114 if (has_acpi_companion(&pdev->dev)) { 5115 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, 5116 &pdev->dev); 5117 priv->hw_version = (unsigned long)acpi_id->driver_data; 5118 } else { 5119 priv->hw_version = 5120 (unsigned long)of_device_get_match_data(&pdev->dev); 5121 } 5122 5123 /* multi queue mode isn't supported on PPV2.1, fallback to single 5124 * mode 5125 */ 5126 if (priv->hw_version == MVPP21) 5127 queue_mode = MVPP2_QDIST_SINGLE_MODE; 5128 5129 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 5130 base = devm_ioremap_resource(&pdev->dev, res); 5131 if (IS_ERR(base)) 5132 return PTR_ERR(base); 5133 5134 if (priv->hw_version == MVPP21) { 5135 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 5136 priv->lms_base = devm_ioremap_resource(&pdev->dev, res); 5137 if (IS_ERR(priv->lms_base)) 5138 return PTR_ERR(priv->lms_base); 5139 } else { 5140 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 5141 if (has_acpi_companion(&pdev->dev)) { 5142 /* In case the MDIO memory region is declared in 5143 * the ACPI, it can already appear as 'in-use' 5144 * in the OS. Because it is overlapped by second 5145 * region of the network controller, make 5146 * sure it is released, before requesting it again. 5147 * The care is taken by mvpp2 driver to avoid 5148 * concurrent access to this memory region. 5149 */ 5150 release_resource(res); 5151 } 5152 priv->iface_base = devm_ioremap_resource(&pdev->dev, res); 5153 if (IS_ERR(priv->iface_base)) 5154 return PTR_ERR(priv->iface_base); 5155 } 5156 5157 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) { 5158 priv->sysctrl_base = 5159 syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 5160 "marvell,system-controller"); 5161 if (IS_ERR(priv->sysctrl_base)) 5162 /* The system controller regmap is optional for dt 5163 * compatibility reasons. When not provided, the 5164 * configuration of the GoP relies on the 5165 * firmware/bootloader. 5166 */ 5167 priv->sysctrl_base = NULL; 5168 } 5169 5170 mvpp2_setup_bm_pool(); 5171 5172 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 5173 u32 addr_space_sz; 5174 5175 addr_space_sz = (priv->hw_version == MVPP21 ? 5176 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); 5177 priv->swth_base[i] = base + i * addr_space_sz; 5178 } 5179 5180 if (priv->hw_version == MVPP21) 5181 priv->max_port_rxqs = 8; 5182 else 5183 priv->max_port_rxqs = 32; 5184 5185 if (dev_of_node(&pdev->dev)) { 5186 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); 5187 if (IS_ERR(priv->pp_clk)) 5188 return PTR_ERR(priv->pp_clk); 5189 err = clk_prepare_enable(priv->pp_clk); 5190 if (err < 0) 5191 return err; 5192 5193 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); 5194 if (IS_ERR(priv->gop_clk)) { 5195 err = PTR_ERR(priv->gop_clk); 5196 goto err_pp_clk; 5197 } 5198 err = clk_prepare_enable(priv->gop_clk); 5199 if (err < 0) 5200 goto err_pp_clk; 5201 5202 if (priv->hw_version == MVPP22) { 5203 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); 5204 if (IS_ERR(priv->mg_clk)) { 5205 err = PTR_ERR(priv->mg_clk); 5206 goto err_gop_clk; 5207 } 5208 5209 err = clk_prepare_enable(priv->mg_clk); 5210 if (err < 0) 5211 goto err_gop_clk; 5212 5213 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); 5214 if (IS_ERR(priv->mg_core_clk)) { 5215 priv->mg_core_clk = NULL; 5216 } else { 5217 err = clk_prepare_enable(priv->mg_core_clk); 5218 if (err < 0) 5219 goto err_mg_clk; 5220 } 5221 } 5222 5223 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); 5224 if (IS_ERR(priv->axi_clk)) { 5225 err = PTR_ERR(priv->axi_clk); 5226 if (err == -EPROBE_DEFER) 5227 goto err_mg_core_clk; 5228 priv->axi_clk = NULL; 5229 } else { 5230 err = clk_prepare_enable(priv->axi_clk); 5231 if (err < 0) 5232 goto err_mg_core_clk; 5233 } 5234 5235 /* Get system's tclk rate */ 5236 priv->tclk = clk_get_rate(priv->pp_clk); 5237 } else if (device_property_read_u32(&pdev->dev, "clock-frequency", 5238 &priv->tclk)) { 5239 dev_err(&pdev->dev, "missing clock-frequency value\n"); 5240 return -EINVAL; 5241 } 5242 5243 if (priv->hw_version == MVPP22) { 5244 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); 5245 if (err) 5246 goto err_axi_clk; 5247 /* Sadly, the BM pools all share the same register to 5248 * store the high 32 bits of their address. So they 5249 * must all have the same high 32 bits, which forces 5250 * us to restrict coherent memory to DMA_BIT_MASK(32). 5251 */ 5252 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 5253 if (err) 5254 goto err_axi_clk; 5255 } 5256 5257 /* Initialize network controller */ 5258 err = mvpp2_init(pdev, priv); 5259 if (err < 0) { 5260 dev_err(&pdev->dev, "failed to initialize controller\n"); 5261 goto err_axi_clk; 5262 } 5263 5264 /* Initialize ports */ 5265 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 5266 err = mvpp2_port_probe(pdev, port_fwnode, priv); 5267 if (err < 0) 5268 goto err_port_probe; 5269 } 5270 5271 if (priv->port_count == 0) { 5272 dev_err(&pdev->dev, "no ports enabled\n"); 5273 err = -ENODEV; 5274 goto err_axi_clk; 5275 } 5276 5277 /* Statistics must be gathered regularly because some of them (like 5278 * packets counters) are 32-bit registers and could overflow quite 5279 * quickly. For instance, a 10Gb link used at full bandwidth with the 5280 * smallest packets (64B) will overflow a 32-bit counter in less than 5281 * 30 seconds. Then, use a workqueue to fill 64-bit counters. 5282 */ 5283 snprintf(priv->queue_name, sizeof(priv->queue_name), 5284 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), 5285 priv->port_count > 1 ? "+" : ""); 5286 priv->stats_queue = create_singlethread_workqueue(priv->queue_name); 5287 if (!priv->stats_queue) { 5288 err = -ENOMEM; 5289 goto err_port_probe; 5290 } 5291 5292 mvpp2_dbgfs_init(priv, pdev->name); 5293 5294 platform_set_drvdata(pdev, priv); 5295 return 0; 5296 5297 err_port_probe: 5298 i = 0; 5299 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 5300 if (priv->port_list[i]) 5301 mvpp2_port_remove(priv->port_list[i]); 5302 i++; 5303 } 5304 err_axi_clk: 5305 clk_disable_unprepare(priv->axi_clk); 5306 5307 err_mg_core_clk: 5308 if (priv->hw_version == MVPP22) 5309 clk_disable_unprepare(priv->mg_core_clk); 5310 err_mg_clk: 5311 if (priv->hw_version == MVPP22) 5312 clk_disable_unprepare(priv->mg_clk); 5313 err_gop_clk: 5314 clk_disable_unprepare(priv->gop_clk); 5315 err_pp_clk: 5316 clk_disable_unprepare(priv->pp_clk); 5317 return err; 5318 } 5319 5320 static int mvpp2_remove(struct platform_device *pdev) 5321 { 5322 struct mvpp2 *priv = platform_get_drvdata(pdev); 5323 struct fwnode_handle *fwnode = pdev->dev.fwnode; 5324 struct fwnode_handle *port_fwnode; 5325 int i = 0; 5326 5327 mvpp2_dbgfs_cleanup(priv); 5328 5329 flush_workqueue(priv->stats_queue); 5330 destroy_workqueue(priv->stats_queue); 5331 5332 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 5333 if (priv->port_list[i]) { 5334 mutex_destroy(&priv->port_list[i]->gather_stats_lock); 5335 mvpp2_port_remove(priv->port_list[i]); 5336 } 5337 i++; 5338 } 5339 5340 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 5341 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; 5342 5343 mvpp2_bm_pool_destroy(pdev, priv, bm_pool); 5344 } 5345 5346 for_each_present_cpu(i) { 5347 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; 5348 5349 dma_free_coherent(&pdev->dev, 5350 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 5351 aggr_txq->descs, 5352 aggr_txq->descs_dma); 5353 } 5354 5355 if (is_acpi_node(port_fwnode)) 5356 return 0; 5357 5358 clk_disable_unprepare(priv->axi_clk); 5359 clk_disable_unprepare(priv->mg_core_clk); 5360 clk_disable_unprepare(priv->mg_clk); 5361 clk_disable_unprepare(priv->pp_clk); 5362 clk_disable_unprepare(priv->gop_clk); 5363 5364 return 0; 5365 } 5366 5367 static const struct of_device_id mvpp2_match[] = { 5368 { 5369 .compatible = "marvell,armada-375-pp2", 5370 .data = (void *)MVPP21, 5371 }, 5372 { 5373 .compatible = "marvell,armada-7k-pp22", 5374 .data = (void *)MVPP22, 5375 }, 5376 { } 5377 }; 5378 MODULE_DEVICE_TABLE(of, mvpp2_match); 5379 5380 static const struct acpi_device_id mvpp2_acpi_match[] = { 5381 { "MRVL0110", MVPP22 }, 5382 { }, 5383 }; 5384 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); 5385 5386 static struct platform_driver mvpp2_driver = { 5387 .probe = mvpp2_probe, 5388 .remove = mvpp2_remove, 5389 .driver = { 5390 .name = MVPP2_DRIVER_NAME, 5391 .of_match_table = mvpp2_match, 5392 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), 5393 }, 5394 }; 5395 5396 module_platform_driver(mvpp2_driver); 5397 5398 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); 5399 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); 5400 MODULE_LICENSE("GPL v2"); 5401