1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 4 * 5 * Copyright (C) 2014 Marvell 6 * 7 * Marcin Wojtas <mw@semihalf.com> 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/kernel.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/platform_device.h> 15 #include <linux/skbuff.h> 16 #include <linux/inetdevice.h> 17 #include <linux/mbus.h> 18 #include <linux/module.h> 19 #include <linux/mfd/syscon.h> 20 #include <linux/interrupt.h> 21 #include <linux/cpumask.h> 22 #include <linux/of.h> 23 #include <linux/of_irq.h> 24 #include <linux/of_mdio.h> 25 #include <linux/of_net.h> 26 #include <linux/of_address.h> 27 #include <linux/of_device.h> 28 #include <linux/phy.h> 29 #include <linux/phylink.h> 30 #include <linux/phy/phy.h> 31 #include <linux/clk.h> 32 #include <linux/hrtimer.h> 33 #include <linux/ktime.h> 34 #include <linux/regmap.h> 35 #include <uapi/linux/ppp_defs.h> 36 #include <net/ip.h> 37 #include <net/ipv6.h> 38 #include <net/tso.h> 39 40 #include "mvpp2.h" 41 #include "mvpp2_prs.h" 42 #include "mvpp2_cls.h" 43 44 enum mvpp2_bm_pool_log_num { 45 MVPP2_BM_SHORT, 46 MVPP2_BM_LONG, 47 MVPP2_BM_JUMBO, 48 MVPP2_BM_POOLS_NUM 49 }; 50 51 static struct { 52 int pkt_size; 53 int buf_num; 54 } mvpp2_pools[MVPP2_BM_POOLS_NUM]; 55 56 /* The prototype is added here to be used in start_dev when using ACPI. This 57 * will be removed once phylink is used for all modes (dt+ACPI). 58 */ 59 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, 60 const struct phylink_link_state *state); 61 static void mvpp2_mac_link_up(struct phylink_config *config, 62 struct phy_device *phy, 63 unsigned int mode, phy_interface_t interface, 64 int speed, int duplex, 65 bool tx_pause, bool rx_pause); 66 67 /* Queue modes */ 68 #define MVPP2_QDIST_SINGLE_MODE 0 69 #define MVPP2_QDIST_MULTI_MODE 1 70 71 static int queue_mode = MVPP2_QDIST_MULTI_MODE; 72 73 module_param(queue_mode, int, 0444); 74 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); 75 76 /* Utility/helper methods */ 77 78 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 79 { 80 writel(data, priv->swth_base[0] + offset); 81 } 82 83 u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 84 { 85 return readl(priv->swth_base[0] + offset); 86 } 87 88 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) 89 { 90 return readl_relaxed(priv->swth_base[0] + offset); 91 } 92 93 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) 94 { 95 return cpu % priv->nthreads; 96 } 97 98 /* These accessors should be used to access: 99 * 100 * - per-thread registers, where each thread has its own copy of the 101 * register. 102 * 103 * MVPP2_BM_VIRT_ALLOC_REG 104 * MVPP2_BM_ADDR_HIGH_ALLOC 105 * MVPP22_BM_ADDR_HIGH_RLS_REG 106 * MVPP2_BM_VIRT_RLS_REG 107 * MVPP2_ISR_RX_TX_CAUSE_REG 108 * MVPP2_ISR_RX_TX_MASK_REG 109 * MVPP2_TXQ_NUM_REG 110 * MVPP2_AGGR_TXQ_UPDATE_REG 111 * MVPP2_TXQ_RSVD_REQ_REG 112 * MVPP2_TXQ_RSVD_RSLT_REG 113 * MVPP2_TXQ_SENT_REG 114 * MVPP2_RXQ_NUM_REG 115 * 116 * - global registers that must be accessed through a specific thread 117 * window, because they are related to an access to a per-thread 118 * register 119 * 120 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) 121 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) 122 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) 123 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) 124 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) 125 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) 126 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 127 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) 128 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) 129 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) 130 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 131 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 132 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 133 */ 134 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, 135 u32 offset, u32 data) 136 { 137 writel(data, priv->swth_base[thread] + offset); 138 } 139 140 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, 141 u32 offset) 142 { 143 return readl(priv->swth_base[thread] + offset); 144 } 145 146 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, 147 u32 offset, u32 data) 148 { 149 writel_relaxed(data, priv->swth_base[thread] + offset); 150 } 151 152 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, 153 u32 offset) 154 { 155 return readl_relaxed(priv->swth_base[thread] + offset); 156 } 157 158 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, 159 struct mvpp2_tx_desc *tx_desc) 160 { 161 if (port->priv->hw_version == MVPP21) 162 return le32_to_cpu(tx_desc->pp21.buf_dma_addr); 163 else 164 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & 165 MVPP2_DESC_DMA_MASK; 166 } 167 168 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 169 struct mvpp2_tx_desc *tx_desc, 170 dma_addr_t dma_addr) 171 { 172 dma_addr_t addr, offset; 173 174 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; 175 offset = dma_addr & MVPP2_TX_DESC_ALIGN; 176 177 if (port->priv->hw_version == MVPP21) { 178 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); 179 tx_desc->pp21.packet_offset = offset; 180 } else { 181 __le64 val = cpu_to_le64(addr); 182 183 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); 184 tx_desc->pp22.buf_dma_addr_ptp |= val; 185 tx_desc->pp22.packet_offset = offset; 186 } 187 } 188 189 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, 190 struct mvpp2_tx_desc *tx_desc) 191 { 192 if (port->priv->hw_version == MVPP21) 193 return le16_to_cpu(tx_desc->pp21.data_size); 194 else 195 return le16_to_cpu(tx_desc->pp22.data_size); 196 } 197 198 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 199 struct mvpp2_tx_desc *tx_desc, 200 size_t size) 201 { 202 if (port->priv->hw_version == MVPP21) 203 tx_desc->pp21.data_size = cpu_to_le16(size); 204 else 205 tx_desc->pp22.data_size = cpu_to_le16(size); 206 } 207 208 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 209 struct mvpp2_tx_desc *tx_desc, 210 unsigned int txq) 211 { 212 if (port->priv->hw_version == MVPP21) 213 tx_desc->pp21.phys_txq = txq; 214 else 215 tx_desc->pp22.phys_txq = txq; 216 } 217 218 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 219 struct mvpp2_tx_desc *tx_desc, 220 unsigned int command) 221 { 222 if (port->priv->hw_version == MVPP21) 223 tx_desc->pp21.command = cpu_to_le32(command); 224 else 225 tx_desc->pp22.command = cpu_to_le32(command); 226 } 227 228 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, 229 struct mvpp2_tx_desc *tx_desc) 230 { 231 if (port->priv->hw_version == MVPP21) 232 return tx_desc->pp21.packet_offset; 233 else 234 return tx_desc->pp22.packet_offset; 235 } 236 237 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 238 struct mvpp2_rx_desc *rx_desc) 239 { 240 if (port->priv->hw_version == MVPP21) 241 return le32_to_cpu(rx_desc->pp21.buf_dma_addr); 242 else 243 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & 244 MVPP2_DESC_DMA_MASK; 245 } 246 247 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 248 struct mvpp2_rx_desc *rx_desc) 249 { 250 if (port->priv->hw_version == MVPP21) 251 return le32_to_cpu(rx_desc->pp21.buf_cookie); 252 else 253 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & 254 MVPP2_DESC_DMA_MASK; 255 } 256 257 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 258 struct mvpp2_rx_desc *rx_desc) 259 { 260 if (port->priv->hw_version == MVPP21) 261 return le16_to_cpu(rx_desc->pp21.data_size); 262 else 263 return le16_to_cpu(rx_desc->pp22.data_size); 264 } 265 266 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 267 struct mvpp2_rx_desc *rx_desc) 268 { 269 if (port->priv->hw_version == MVPP21) 270 return le32_to_cpu(rx_desc->pp21.status); 271 else 272 return le32_to_cpu(rx_desc->pp22.status); 273 } 274 275 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 276 { 277 txq_pcpu->txq_get_index++; 278 if (txq_pcpu->txq_get_index == txq_pcpu->size) 279 txq_pcpu->txq_get_index = 0; 280 } 281 282 static void mvpp2_txq_inc_put(struct mvpp2_port *port, 283 struct mvpp2_txq_pcpu *txq_pcpu, 284 struct sk_buff *skb, 285 struct mvpp2_tx_desc *tx_desc) 286 { 287 struct mvpp2_txq_pcpu_buf *tx_buf = 288 txq_pcpu->buffs + txq_pcpu->txq_put_index; 289 tx_buf->skb = skb; 290 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); 291 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + 292 mvpp2_txdesc_offset_get(port, tx_desc); 293 txq_pcpu->txq_put_index++; 294 if (txq_pcpu->txq_put_index == txq_pcpu->size) 295 txq_pcpu->txq_put_index = 0; 296 } 297 298 /* Get number of maximum RXQ */ 299 static int mvpp2_get_nrxqs(struct mvpp2 *priv) 300 { 301 unsigned int nrxqs; 302 303 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) 304 return 1; 305 306 /* According to the PPv2.2 datasheet and our experiments on 307 * PPv2.1, RX queues have an allocation granularity of 4 (when 308 * more than a single one on PPv2.2). 309 * Round up to nearest multiple of 4. 310 */ 311 nrxqs = (num_possible_cpus() + 3) & ~0x3; 312 if (nrxqs > MVPP2_PORT_MAX_RXQ) 313 nrxqs = MVPP2_PORT_MAX_RXQ; 314 315 return nrxqs; 316 } 317 318 /* Get number of physical egress port */ 319 static inline int mvpp2_egress_port(struct mvpp2_port *port) 320 { 321 return MVPP2_MAX_TCONT + port->id; 322 } 323 324 /* Get number of physical TXQ */ 325 static inline int mvpp2_txq_phys(int port, int txq) 326 { 327 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 328 } 329 330 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) 331 { 332 if (likely(pool->frag_size <= PAGE_SIZE)) 333 return netdev_alloc_frag(pool->frag_size); 334 else 335 return kmalloc(pool->frag_size, GFP_ATOMIC); 336 } 337 338 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) 339 { 340 if (likely(pool->frag_size <= PAGE_SIZE)) 341 skb_free_frag(data); 342 else 343 kfree(data); 344 } 345 346 /* Buffer Manager configuration routines */ 347 348 /* Create pool */ 349 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, 350 struct mvpp2_bm_pool *bm_pool, int size) 351 { 352 u32 val; 353 354 /* Number of buffer pointers must be a multiple of 16, as per 355 * hardware constraints 356 */ 357 if (!IS_ALIGNED(size, 16)) 358 return -EINVAL; 359 360 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 361 * bytes per buffer pointer 362 */ 363 if (priv->hw_version == MVPP21) 364 bm_pool->size_bytes = 2 * sizeof(u32) * size; 365 else 366 bm_pool->size_bytes = 2 * sizeof(u64) * size; 367 368 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes, 369 &bm_pool->dma_addr, 370 GFP_KERNEL); 371 if (!bm_pool->virt_addr) 372 return -ENOMEM; 373 374 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 375 MVPP2_BM_POOL_PTR_ALIGN)) { 376 dma_free_coherent(dev, bm_pool->size_bytes, 377 bm_pool->virt_addr, bm_pool->dma_addr); 378 dev_err(dev, "BM pool %d is not %d bytes aligned\n", 379 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 380 return -ENOMEM; 381 } 382 383 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 384 lower_32_bits(bm_pool->dma_addr)); 385 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 386 387 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 388 val |= MVPP2_BM_START_MASK; 389 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 390 391 bm_pool->size = size; 392 bm_pool->pkt_size = 0; 393 bm_pool->buf_num = 0; 394 395 return 0; 396 } 397 398 /* Set pool buffer size */ 399 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 400 struct mvpp2_bm_pool *bm_pool, 401 int buf_size) 402 { 403 u32 val; 404 405 bm_pool->buf_size = buf_size; 406 407 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 408 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 409 } 410 411 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, 412 struct mvpp2_bm_pool *bm_pool, 413 dma_addr_t *dma_addr, 414 phys_addr_t *phys_addr) 415 { 416 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); 417 418 *dma_addr = mvpp2_thread_read(priv, thread, 419 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 420 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); 421 422 if (priv->hw_version == MVPP22) { 423 u32 val; 424 u32 dma_addr_highbits, phys_addr_highbits; 425 426 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); 427 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); 428 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> 429 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; 430 431 if (sizeof(dma_addr_t) == 8) 432 *dma_addr |= (u64)dma_addr_highbits << 32; 433 434 if (sizeof(phys_addr_t) == 8) 435 *phys_addr |= (u64)phys_addr_highbits << 32; 436 } 437 438 put_cpu(); 439 } 440 441 /* Free all buffers from the pool */ 442 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, 443 struct mvpp2_bm_pool *bm_pool, int buf_num) 444 { 445 int i; 446 447 if (buf_num > bm_pool->buf_num) { 448 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", 449 bm_pool->id, buf_num); 450 buf_num = bm_pool->buf_num; 451 } 452 453 for (i = 0; i < buf_num; i++) { 454 dma_addr_t buf_dma_addr; 455 phys_addr_t buf_phys_addr; 456 void *data; 457 458 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, 459 &buf_dma_addr, &buf_phys_addr); 460 461 dma_unmap_single(dev, buf_dma_addr, 462 bm_pool->buf_size, DMA_FROM_DEVICE); 463 464 data = (void *)phys_to_virt(buf_phys_addr); 465 if (!data) 466 break; 467 468 mvpp2_frag_free(bm_pool, data); 469 } 470 471 /* Update BM driver with number of buffers removed from pool */ 472 bm_pool->buf_num -= i; 473 } 474 475 /* Check number of buffers in BM pool */ 476 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) 477 { 478 int buf_num = 0; 479 480 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & 481 MVPP22_BM_POOL_PTRS_NUM_MASK; 482 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & 483 MVPP2_BM_BPPI_PTR_NUM_MASK; 484 485 /* HW has one buffer ready which is not reflected in the counters */ 486 if (buf_num) 487 buf_num += 1; 488 489 return buf_num; 490 } 491 492 /* Cleanup pool */ 493 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, 494 struct mvpp2_bm_pool *bm_pool) 495 { 496 int buf_num; 497 u32 val; 498 499 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); 500 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); 501 502 /* Check buffer counters after free */ 503 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); 504 if (buf_num) { 505 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", 506 bm_pool->id, bm_pool->buf_num); 507 return 0; 508 } 509 510 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 511 val |= MVPP2_BM_STOP_MASK; 512 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 513 514 dma_free_coherent(dev, bm_pool->size_bytes, 515 bm_pool->virt_addr, 516 bm_pool->dma_addr); 517 return 0; 518 } 519 520 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) 521 { 522 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; 523 struct mvpp2_bm_pool *bm_pool; 524 525 if (priv->percpu_pools) 526 poolnum = mvpp2_get_nrxqs(priv) * 2; 527 528 /* Create all pools with maximum size */ 529 size = MVPP2_BM_POOL_SIZE_MAX; 530 for (i = 0; i < poolnum; i++) { 531 bm_pool = &priv->bm_pools[i]; 532 bm_pool->id = i; 533 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 534 if (err) 535 goto err_unroll_pools; 536 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 537 } 538 return 0; 539 540 err_unroll_pools: 541 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); 542 for (i = i - 1; i >= 0; i--) 543 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 544 return err; 545 } 546 547 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) 548 { 549 int i, err, poolnum = MVPP2_BM_POOLS_NUM; 550 551 if (priv->percpu_pools) 552 poolnum = mvpp2_get_nrxqs(priv) * 2; 553 554 dev_info(dev, "using %d %s buffers\n", poolnum, 555 priv->percpu_pools ? "per-cpu" : "shared"); 556 557 for (i = 0; i < poolnum; i++) { 558 /* Mask BM all interrupts */ 559 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 560 /* Clear BM cause register */ 561 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 562 } 563 564 /* Allocate and initialize BM pools */ 565 priv->bm_pools = devm_kcalloc(dev, poolnum, 566 sizeof(*priv->bm_pools), GFP_KERNEL); 567 if (!priv->bm_pools) 568 return -ENOMEM; 569 570 err = mvpp2_bm_pools_init(dev, priv); 571 if (err < 0) 572 return err; 573 return 0; 574 } 575 576 static void mvpp2_setup_bm_pool(void) 577 { 578 /* Short pool */ 579 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; 580 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; 581 582 /* Long pool */ 583 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; 584 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; 585 586 /* Jumbo pool */ 587 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; 588 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; 589 } 590 591 /* Attach long pool to rxq */ 592 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 593 int lrxq, int long_pool) 594 { 595 u32 val, mask; 596 int prxq; 597 598 /* Get queue physical ID */ 599 prxq = port->rxqs[lrxq]->id; 600 601 if (port->priv->hw_version == MVPP21) 602 mask = MVPP21_RXQ_POOL_LONG_MASK; 603 else 604 mask = MVPP22_RXQ_POOL_LONG_MASK; 605 606 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 607 val &= ~mask; 608 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 609 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 610 } 611 612 /* Attach short pool to rxq */ 613 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, 614 int lrxq, int short_pool) 615 { 616 u32 val, mask; 617 int prxq; 618 619 /* Get queue physical ID */ 620 prxq = port->rxqs[lrxq]->id; 621 622 if (port->priv->hw_version == MVPP21) 623 mask = MVPP21_RXQ_POOL_SHORT_MASK; 624 else 625 mask = MVPP22_RXQ_POOL_SHORT_MASK; 626 627 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 628 val &= ~mask; 629 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; 630 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 631 } 632 633 static void *mvpp2_buf_alloc(struct mvpp2_port *port, 634 struct mvpp2_bm_pool *bm_pool, 635 dma_addr_t *buf_dma_addr, 636 phys_addr_t *buf_phys_addr, 637 gfp_t gfp_mask) 638 { 639 dma_addr_t dma_addr; 640 void *data; 641 642 data = mvpp2_frag_alloc(bm_pool); 643 if (!data) 644 return NULL; 645 646 dma_addr = dma_map_single(port->dev->dev.parent, data, 647 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), 648 DMA_FROM_DEVICE); 649 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { 650 mvpp2_frag_free(bm_pool, data); 651 return NULL; 652 } 653 *buf_dma_addr = dma_addr; 654 *buf_phys_addr = virt_to_phys(data); 655 656 return data; 657 } 658 659 /* Release buffer to BM */ 660 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 661 dma_addr_t buf_dma_addr, 662 phys_addr_t buf_phys_addr) 663 { 664 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 665 unsigned long flags = 0; 666 667 if (test_bit(thread, &port->priv->lock_map)) 668 spin_lock_irqsave(&port->bm_lock[thread], flags); 669 670 if (port->priv->hw_version == MVPP22) { 671 u32 val = 0; 672 673 if (sizeof(dma_addr_t) == 8) 674 val |= upper_32_bits(buf_dma_addr) & 675 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 676 677 if (sizeof(phys_addr_t) == 8) 678 val |= (upper_32_bits(buf_phys_addr) 679 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 680 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 681 682 mvpp2_thread_write_relaxed(port->priv, thread, 683 MVPP22_BM_ADDR_HIGH_RLS_REG, val); 684 } 685 686 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 687 * returned in the "cookie" field of the RX 688 * descriptor. Instead of storing the virtual address, we 689 * store the physical address 690 */ 691 mvpp2_thread_write_relaxed(port->priv, thread, 692 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 693 mvpp2_thread_write_relaxed(port->priv, thread, 694 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 695 696 if (test_bit(thread, &port->priv->lock_map)) 697 spin_unlock_irqrestore(&port->bm_lock[thread], flags); 698 699 put_cpu(); 700 } 701 702 /* Allocate buffers for the pool */ 703 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 704 struct mvpp2_bm_pool *bm_pool, int buf_num) 705 { 706 int i, buf_size, total_size; 707 dma_addr_t dma_addr; 708 phys_addr_t phys_addr; 709 void *buf; 710 711 if (port->priv->percpu_pools && 712 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { 713 netdev_err(port->dev, 714 "attempted to use jumbo frames with per-cpu pools"); 715 return 0; 716 } 717 718 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); 719 total_size = MVPP2_RX_TOTAL_SIZE(buf_size); 720 721 if (buf_num < 0 || 722 (buf_num + bm_pool->buf_num > bm_pool->size)) { 723 netdev_err(port->dev, 724 "cannot allocate %d buffers for pool %d\n", 725 buf_num, bm_pool->id); 726 return 0; 727 } 728 729 for (i = 0; i < buf_num; i++) { 730 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, 731 &phys_addr, GFP_KERNEL); 732 if (!buf) 733 break; 734 735 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, 736 phys_addr); 737 } 738 739 /* Update BM driver with number of buffers added to pool */ 740 bm_pool->buf_num += i; 741 742 netdev_dbg(port->dev, 743 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", 744 bm_pool->id, bm_pool->pkt_size, buf_size, total_size); 745 746 netdev_dbg(port->dev, 747 "pool %d: %d of %d buffers added\n", 748 bm_pool->id, i, buf_num); 749 return i; 750 } 751 752 /* Notify the driver that BM pool is being used as specific type and return the 753 * pool pointer on success 754 */ 755 static struct mvpp2_bm_pool * 756 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) 757 { 758 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 759 int num; 760 761 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || 762 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { 763 netdev_err(port->dev, "Invalid pool %d\n", pool); 764 return NULL; 765 } 766 767 /* Allocate buffers in case BM pool is used as long pool, but packet 768 * size doesn't match MTU or BM pool hasn't being used yet 769 */ 770 if (new_pool->pkt_size == 0) { 771 int pkts_num; 772 773 /* Set default buffer number or free all the buffers in case 774 * the pool is not empty 775 */ 776 pkts_num = new_pool->buf_num; 777 if (pkts_num == 0) { 778 if (port->priv->percpu_pools) { 779 if (pool < port->nrxqs) 780 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; 781 else 782 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; 783 } else { 784 pkts_num = mvpp2_pools[pool].buf_num; 785 } 786 } else { 787 mvpp2_bm_bufs_free(port->dev->dev.parent, 788 port->priv, new_pool, pkts_num); 789 } 790 791 new_pool->pkt_size = pkt_size; 792 new_pool->frag_size = 793 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 794 MVPP2_SKB_SHINFO_SIZE; 795 796 /* Allocate buffers for this pool */ 797 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 798 if (num != pkts_num) { 799 WARN(1, "pool %d: %d of %d allocated\n", 800 new_pool->id, num, pkts_num); 801 return NULL; 802 } 803 } 804 805 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 806 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 807 808 return new_pool; 809 } 810 811 static struct mvpp2_bm_pool * 812 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, 813 unsigned int pool, int pkt_size) 814 { 815 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 816 int num; 817 818 if (pool > port->nrxqs * 2) { 819 netdev_err(port->dev, "Invalid pool %d\n", pool); 820 return NULL; 821 } 822 823 /* Allocate buffers in case BM pool is used as long pool, but packet 824 * size doesn't match MTU or BM pool hasn't being used yet 825 */ 826 if (new_pool->pkt_size == 0) { 827 int pkts_num; 828 829 /* Set default buffer number or free all the buffers in case 830 * the pool is not empty 831 */ 832 pkts_num = new_pool->buf_num; 833 if (pkts_num == 0) 834 pkts_num = mvpp2_pools[type].buf_num; 835 else 836 mvpp2_bm_bufs_free(port->dev->dev.parent, 837 port->priv, new_pool, pkts_num); 838 839 new_pool->pkt_size = pkt_size; 840 new_pool->frag_size = 841 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 842 MVPP2_SKB_SHINFO_SIZE; 843 844 /* Allocate buffers for this pool */ 845 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 846 if (num != pkts_num) { 847 WARN(1, "pool %d: %d of %d allocated\n", 848 new_pool->id, num, pkts_num); 849 return NULL; 850 } 851 } 852 853 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 854 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 855 856 return new_pool; 857 } 858 859 /* Initialize pools for swf, shared buffers variant */ 860 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) 861 { 862 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; 863 int rxq; 864 865 /* If port pkt_size is higher than 1518B: 866 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool 867 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool 868 */ 869 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { 870 long_log_pool = MVPP2_BM_JUMBO; 871 short_log_pool = MVPP2_BM_LONG; 872 } else { 873 long_log_pool = MVPP2_BM_LONG; 874 short_log_pool = MVPP2_BM_SHORT; 875 } 876 877 if (!port->pool_long) { 878 port->pool_long = 879 mvpp2_bm_pool_use(port, long_log_pool, 880 mvpp2_pools[long_log_pool].pkt_size); 881 if (!port->pool_long) 882 return -ENOMEM; 883 884 port->pool_long->port_map |= BIT(port->id); 885 886 for (rxq = 0; rxq < port->nrxqs; rxq++) 887 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 888 } 889 890 if (!port->pool_short) { 891 port->pool_short = 892 mvpp2_bm_pool_use(port, short_log_pool, 893 mvpp2_pools[short_log_pool].pkt_size); 894 if (!port->pool_short) 895 return -ENOMEM; 896 897 port->pool_short->port_map |= BIT(port->id); 898 899 for (rxq = 0; rxq < port->nrxqs; rxq++) 900 mvpp2_rxq_short_pool_set(port, rxq, 901 port->pool_short->id); 902 } 903 904 return 0; 905 } 906 907 /* Initialize pools for swf, percpu buffers variant */ 908 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) 909 { 910 struct mvpp2_bm_pool *p; 911 int i; 912 913 for (i = 0; i < port->nrxqs; i++) { 914 p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, 915 mvpp2_pools[MVPP2_BM_SHORT].pkt_size); 916 if (!p) 917 return -ENOMEM; 918 919 port->priv->bm_pools[i].port_map |= BIT(port->id); 920 mvpp2_rxq_short_pool_set(port, i, port->priv->bm_pools[i].id); 921 } 922 923 for (i = 0; i < port->nrxqs; i++) { 924 p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, 925 mvpp2_pools[MVPP2_BM_LONG].pkt_size); 926 if (!p) 927 return -ENOMEM; 928 929 port->priv->bm_pools[i + port->nrxqs].port_map |= BIT(port->id); 930 mvpp2_rxq_long_pool_set(port, i, 931 port->priv->bm_pools[i + port->nrxqs].id); 932 } 933 934 port->pool_long = NULL; 935 port->pool_short = NULL; 936 937 return 0; 938 } 939 940 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 941 { 942 if (port->priv->percpu_pools) 943 return mvpp2_swf_bm_pool_init_percpu(port); 944 else 945 return mvpp2_swf_bm_pool_init_shared(port); 946 } 947 948 static void mvpp2_set_hw_csum(struct mvpp2_port *port, 949 enum mvpp2_bm_pool_log_num new_long_pool) 950 { 951 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 952 953 /* Update L4 checksum when jumbo enable/disable on port. 954 * Only port 0 supports hardware checksum offload due to 955 * the Tx FIFO size limitation. 956 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor 957 * has 7 bits, so the maximum L3 offset is 128. 958 */ 959 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { 960 port->dev->features &= ~csums; 961 port->dev->hw_features &= ~csums; 962 } else { 963 port->dev->features |= csums; 964 port->dev->hw_features |= csums; 965 } 966 } 967 968 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) 969 { 970 struct mvpp2_port *port = netdev_priv(dev); 971 enum mvpp2_bm_pool_log_num new_long_pool; 972 int pkt_size = MVPP2_RX_PKT_SIZE(mtu); 973 974 if (port->priv->percpu_pools) 975 goto out_set; 976 977 /* If port MTU is higher than 1518B: 978 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool 979 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool 980 */ 981 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 982 new_long_pool = MVPP2_BM_JUMBO; 983 else 984 new_long_pool = MVPP2_BM_LONG; 985 986 if (new_long_pool != port->pool_long->id) { 987 /* Remove port from old short & long pool */ 988 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, 989 port->pool_long->pkt_size); 990 port->pool_long->port_map &= ~BIT(port->id); 991 port->pool_long = NULL; 992 993 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, 994 port->pool_short->pkt_size); 995 port->pool_short->port_map &= ~BIT(port->id); 996 port->pool_short = NULL; 997 998 port->pkt_size = pkt_size; 999 1000 /* Add port to new short & long pool */ 1001 mvpp2_swf_bm_pool_init(port); 1002 1003 mvpp2_set_hw_csum(port, new_long_pool); 1004 } 1005 1006 out_set: 1007 dev->mtu = mtu; 1008 dev->wanted_features = dev->features; 1009 1010 netdev_update_features(dev); 1011 return 0; 1012 } 1013 1014 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) 1015 { 1016 int i, sw_thread_mask = 0; 1017 1018 for (i = 0; i < port->nqvecs; i++) 1019 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 1020 1021 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1022 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); 1023 } 1024 1025 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) 1026 { 1027 int i, sw_thread_mask = 0; 1028 1029 for (i = 0; i < port->nqvecs; i++) 1030 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 1031 1032 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1033 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); 1034 } 1035 1036 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) 1037 { 1038 struct mvpp2_port *port = qvec->port; 1039 1040 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1041 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); 1042 } 1043 1044 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) 1045 { 1046 struct mvpp2_port *port = qvec->port; 1047 1048 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1049 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); 1050 } 1051 1052 /* Mask the current thread's Rx/Tx interrupts 1053 * Called by on_each_cpu(), guaranteed to run with migration disabled, 1054 * using smp_processor_id() is OK. 1055 */ 1056 static void mvpp2_interrupts_mask(void *arg) 1057 { 1058 struct mvpp2_port *port = arg; 1059 1060 /* If the thread isn't used, don't do anything */ 1061 if (smp_processor_id() > port->priv->nthreads) 1062 return; 1063 1064 mvpp2_thread_write(port->priv, 1065 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 1066 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); 1067 } 1068 1069 /* Unmask the current thread's Rx/Tx interrupts. 1070 * Called by on_each_cpu(), guaranteed to run with migration disabled, 1071 * using smp_processor_id() is OK. 1072 */ 1073 static void mvpp2_interrupts_unmask(void *arg) 1074 { 1075 struct mvpp2_port *port = arg; 1076 u32 val; 1077 1078 /* If the thread isn't used, don't do anything */ 1079 if (smp_processor_id() > port->priv->nthreads) 1080 return; 1081 1082 val = MVPP2_CAUSE_MISC_SUM_MASK | 1083 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); 1084 if (port->has_tx_irqs) 1085 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 1086 1087 mvpp2_thread_write(port->priv, 1088 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 1089 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 1090 } 1091 1092 static void 1093 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) 1094 { 1095 u32 val; 1096 int i; 1097 1098 if (port->priv->hw_version != MVPP22) 1099 return; 1100 1101 if (mask) 1102 val = 0; 1103 else 1104 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); 1105 1106 for (i = 0; i < port->nqvecs; i++) { 1107 struct mvpp2_queue_vector *v = port->qvecs + i; 1108 1109 if (v->type != MVPP2_QUEUE_VECTOR_SHARED) 1110 continue; 1111 1112 mvpp2_thread_write(port->priv, v->sw_thread_id, 1113 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 1114 } 1115 } 1116 1117 /* Port configuration routines */ 1118 static bool mvpp2_is_xlg(phy_interface_t interface) 1119 { 1120 return interface == PHY_INTERFACE_MODE_10GBASER || 1121 interface == PHY_INTERFACE_MODE_XAUI; 1122 } 1123 1124 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) 1125 { 1126 struct mvpp2 *priv = port->priv; 1127 u32 val; 1128 1129 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1130 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; 1131 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1132 1133 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 1134 if (port->gop_id == 2) 1135 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; 1136 else if (port->gop_id == 3) 1137 val |= GENCONF_CTRL0_PORT1_RGMII_MII; 1138 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 1139 } 1140 1141 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) 1142 { 1143 struct mvpp2 *priv = port->priv; 1144 u32 val; 1145 1146 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1147 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | 1148 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; 1149 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1150 1151 if (port->gop_id > 1) { 1152 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 1153 if (port->gop_id == 2) 1154 val &= ~GENCONF_CTRL0_PORT0_RGMII; 1155 else if (port->gop_id == 3) 1156 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; 1157 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 1158 } 1159 } 1160 1161 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) 1162 { 1163 struct mvpp2 *priv = port->priv; 1164 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 1165 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 1166 u32 val; 1167 1168 val = readl(xpcs + MVPP22_XPCS_CFG0); 1169 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | 1170 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); 1171 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); 1172 writel(val, xpcs + MVPP22_XPCS_CFG0); 1173 1174 val = readl(mpcs + MVPP22_MPCS_CTRL); 1175 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; 1176 writel(val, mpcs + MVPP22_MPCS_CTRL); 1177 1178 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 1179 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); 1180 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); 1181 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 1182 } 1183 1184 static int mvpp22_gop_init(struct mvpp2_port *port) 1185 { 1186 struct mvpp2 *priv = port->priv; 1187 u32 val; 1188 1189 if (!priv->sysctrl_base) 1190 return 0; 1191 1192 switch (port->phy_interface) { 1193 case PHY_INTERFACE_MODE_RGMII: 1194 case PHY_INTERFACE_MODE_RGMII_ID: 1195 case PHY_INTERFACE_MODE_RGMII_RXID: 1196 case PHY_INTERFACE_MODE_RGMII_TXID: 1197 if (port->gop_id == 0) 1198 goto invalid_conf; 1199 mvpp22_gop_init_rgmii(port); 1200 break; 1201 case PHY_INTERFACE_MODE_SGMII: 1202 case PHY_INTERFACE_MODE_1000BASEX: 1203 case PHY_INTERFACE_MODE_2500BASEX: 1204 mvpp22_gop_init_sgmii(port); 1205 break; 1206 case PHY_INTERFACE_MODE_10GBASER: 1207 if (port->gop_id != 0) 1208 goto invalid_conf; 1209 mvpp22_gop_init_10gkr(port); 1210 break; 1211 default: 1212 goto unsupported_conf; 1213 } 1214 1215 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); 1216 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | 1217 GENCONF_PORT_CTRL1_EN(port->gop_id); 1218 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); 1219 1220 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1221 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; 1222 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1223 1224 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); 1225 val |= GENCONF_SOFT_RESET1_GOP; 1226 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); 1227 1228 unsupported_conf: 1229 return 0; 1230 1231 invalid_conf: 1232 netdev_err(port->dev, "Invalid port configuration\n"); 1233 return -EINVAL; 1234 } 1235 1236 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) 1237 { 1238 u32 val; 1239 1240 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1241 phy_interface_mode_is_8023z(port->phy_interface) || 1242 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1243 /* Enable the GMAC link status irq for this port */ 1244 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 1245 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 1246 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 1247 } 1248 1249 if (port->gop_id == 0) { 1250 /* Enable the XLG/GIG irqs for this port */ 1251 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 1252 if (mvpp2_is_xlg(port->phy_interface)) 1253 val |= MVPP22_XLG_EXT_INT_MASK_XLG; 1254 else 1255 val |= MVPP22_XLG_EXT_INT_MASK_GIG; 1256 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 1257 } 1258 } 1259 1260 static void mvpp22_gop_mask_irq(struct mvpp2_port *port) 1261 { 1262 u32 val; 1263 1264 if (port->gop_id == 0) { 1265 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 1266 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | 1267 MVPP22_XLG_EXT_INT_MASK_GIG); 1268 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 1269 } 1270 1271 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1272 phy_interface_mode_is_8023z(port->phy_interface) || 1273 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1274 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 1275 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 1276 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 1277 } 1278 } 1279 1280 static void mvpp22_gop_setup_irq(struct mvpp2_port *port) 1281 { 1282 u32 val; 1283 1284 if (port->phylink || 1285 phy_interface_mode_is_rgmii(port->phy_interface) || 1286 phy_interface_mode_is_8023z(port->phy_interface) || 1287 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1288 val = readl(port->base + MVPP22_GMAC_INT_MASK); 1289 val |= MVPP22_GMAC_INT_MASK_LINK_STAT; 1290 writel(val, port->base + MVPP22_GMAC_INT_MASK); 1291 } 1292 1293 if (port->gop_id == 0) { 1294 val = readl(port->base + MVPP22_XLG_INT_MASK); 1295 val |= MVPP22_XLG_INT_MASK_LINK; 1296 writel(val, port->base + MVPP22_XLG_INT_MASK); 1297 } 1298 1299 mvpp22_gop_unmask_irq(port); 1300 } 1301 1302 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes). 1303 * 1304 * The PHY mode used by the PPv2 driver comes from the network subsystem, while 1305 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they 1306 * differ. 1307 * 1308 * The COMPHY configures the serdes lanes regardless of the actual use of the 1309 * lanes by the physical layer. This is why configurations like 1310 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. 1311 */ 1312 static int mvpp22_comphy_init(struct mvpp2_port *port) 1313 { 1314 int ret; 1315 1316 if (!port->comphy) 1317 return 0; 1318 1319 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, 1320 port->phy_interface); 1321 if (ret) 1322 return ret; 1323 1324 return phy_power_on(port->comphy); 1325 } 1326 1327 static void mvpp2_port_enable(struct mvpp2_port *port) 1328 { 1329 u32 val; 1330 1331 /* Only GOP port 0 has an XLG MAC */ 1332 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { 1333 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 1334 val |= MVPP22_XLG_CTRL0_PORT_EN; 1335 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; 1336 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1337 } else { 1338 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1339 val |= MVPP2_GMAC_PORT_EN_MASK; 1340 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 1341 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1342 } 1343 } 1344 1345 static void mvpp2_port_disable(struct mvpp2_port *port) 1346 { 1347 u32 val; 1348 1349 /* Only GOP port 0 has an XLG MAC */ 1350 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { 1351 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 1352 val &= ~MVPP22_XLG_CTRL0_PORT_EN; 1353 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1354 } 1355 1356 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1357 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 1358 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1359 } 1360 1361 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 1362 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 1363 { 1364 u32 val; 1365 1366 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 1367 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 1368 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 1369 } 1370 1371 /* Configure loopback port */ 1372 static void mvpp2_port_loopback_set(struct mvpp2_port *port, 1373 const struct phylink_link_state *state) 1374 { 1375 u32 val; 1376 1377 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 1378 1379 if (state->speed == 1000) 1380 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 1381 else 1382 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 1383 1384 if (phy_interface_mode_is_8023z(port->phy_interface) || 1385 port->phy_interface == PHY_INTERFACE_MODE_SGMII) 1386 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 1387 else 1388 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 1389 1390 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 1391 } 1392 1393 struct mvpp2_ethtool_counter { 1394 unsigned int offset; 1395 const char string[ETH_GSTRING_LEN]; 1396 bool reg_is_64b; 1397 }; 1398 1399 static u64 mvpp2_read_count(struct mvpp2_port *port, 1400 const struct mvpp2_ethtool_counter *counter) 1401 { 1402 u64 val; 1403 1404 val = readl(port->stats_base + counter->offset); 1405 if (counter->reg_is_64b) 1406 val += (u64)readl(port->stats_base + counter->offset + 4) << 32; 1407 1408 return val; 1409 } 1410 1411 /* Some counters are accessed indirectly by first writing an index to 1412 * MVPP2_CTRS_IDX. The index can represent various resources depending on the 1413 * register we access, it can be a hit counter for some classification tables, 1414 * a counter specific to a rxq, a txq or a buffer pool. 1415 */ 1416 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) 1417 { 1418 mvpp2_write(priv, MVPP2_CTRS_IDX, index); 1419 return mvpp2_read(priv, reg); 1420 } 1421 1422 /* Due to the fact that software statistics and hardware statistics are, by 1423 * design, incremented at different moments in the chain of packet processing, 1424 * it is very likely that incoming packets could have been dropped after being 1425 * counted by hardware but before reaching software statistics (most probably 1426 * multicast packets), and in the oppposite way, during transmission, FCS bytes 1427 * are added in between as well as TSO skb will be split and header bytes added. 1428 * Hence, statistics gathered from userspace with ifconfig (software) and 1429 * ethtool (hardware) cannot be compared. 1430 */ 1431 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = { 1432 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, 1433 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, 1434 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, 1435 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, 1436 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, 1437 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, 1438 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, 1439 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, 1440 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, 1441 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, 1442 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, 1443 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, 1444 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, 1445 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, 1446 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, 1447 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, 1448 { MVPP2_MIB_FC_SENT, "fc_sent" }, 1449 { MVPP2_MIB_FC_RCVD, "fc_received" }, 1450 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, 1451 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, 1452 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, 1453 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, 1454 { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, 1455 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, 1456 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, 1457 { MVPP2_MIB_COLLISION, "collision" }, 1458 { MVPP2_MIB_LATE_COLLISION, "late_collision" }, 1459 }; 1460 1461 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = { 1462 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" }, 1463 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" }, 1464 }; 1465 1466 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = { 1467 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" }, 1468 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" }, 1469 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" }, 1470 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" }, 1471 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" }, 1472 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" }, 1473 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" }, 1474 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" }, 1475 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" }, 1476 }; 1477 1478 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { 1479 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" }, 1480 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" }, 1481 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" }, 1482 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, 1483 }; 1484 1485 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ 1486 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ 1487 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ 1488 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs))) 1489 1490 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, 1491 u8 *data) 1492 { 1493 struct mvpp2_port *port = netdev_priv(netdev); 1494 int i, q; 1495 1496 if (sset != ETH_SS_STATS) 1497 return; 1498 1499 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) { 1500 strscpy(data, mvpp2_ethtool_mib_regs[i].string, 1501 ETH_GSTRING_LEN); 1502 data += ETH_GSTRING_LEN; 1503 } 1504 1505 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) { 1506 strscpy(data, mvpp2_ethtool_port_regs[i].string, 1507 ETH_GSTRING_LEN); 1508 data += ETH_GSTRING_LEN; 1509 } 1510 1511 for (q = 0; q < port->ntxqs; q++) { 1512 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) { 1513 snprintf(data, ETH_GSTRING_LEN, 1514 mvpp2_ethtool_txq_regs[i].string, q); 1515 data += ETH_GSTRING_LEN; 1516 } 1517 } 1518 1519 for (q = 0; q < port->nrxqs; q++) { 1520 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) { 1521 snprintf(data, ETH_GSTRING_LEN, 1522 mvpp2_ethtool_rxq_regs[i].string, 1523 q); 1524 data += ETH_GSTRING_LEN; 1525 } 1526 } 1527 } 1528 1529 static void mvpp2_read_stats(struct mvpp2_port *port) 1530 { 1531 u64 *pstats; 1532 int i, q; 1533 1534 pstats = port->ethtool_stats; 1535 1536 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) 1537 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]); 1538 1539 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) 1540 *pstats++ += mvpp2_read(port->priv, 1541 mvpp2_ethtool_port_regs[i].offset + 1542 4 * port->id); 1543 1544 for (q = 0; q < port->ntxqs; q++) 1545 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) 1546 *pstats++ += mvpp2_read_index(port->priv, 1547 MVPP22_CTRS_TX_CTR(port->id, i), 1548 mvpp2_ethtool_txq_regs[i].offset); 1549 1550 /* Rxqs are numbered from 0 from the user standpoint, but not from the 1551 * driver's. We need to add the port->first_rxq offset. 1552 */ 1553 for (q = 0; q < port->nrxqs; q++) 1554 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) 1555 *pstats++ += mvpp2_read_index(port->priv, 1556 port->first_rxq + i, 1557 mvpp2_ethtool_rxq_regs[i].offset); 1558 } 1559 1560 static void mvpp2_gather_hw_statistics(struct work_struct *work) 1561 { 1562 struct delayed_work *del_work = to_delayed_work(work); 1563 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, 1564 stats_work); 1565 1566 mutex_lock(&port->gather_stats_lock); 1567 1568 mvpp2_read_stats(port); 1569 1570 /* No need to read again the counters right after this function if it 1571 * was called asynchronously by the user (ie. use of ethtool). 1572 */ 1573 cancel_delayed_work(&port->stats_work); 1574 queue_delayed_work(port->priv->stats_queue, &port->stats_work, 1575 MVPP2_MIB_COUNTERS_STATS_DELAY); 1576 1577 mutex_unlock(&port->gather_stats_lock); 1578 } 1579 1580 static void mvpp2_ethtool_get_stats(struct net_device *dev, 1581 struct ethtool_stats *stats, u64 *data) 1582 { 1583 struct mvpp2_port *port = netdev_priv(dev); 1584 1585 /* Update statistics for the given port, then take the lock to avoid 1586 * concurrent accesses on the ethtool_stats structure during its copy. 1587 */ 1588 mvpp2_gather_hw_statistics(&port->stats_work.work); 1589 1590 mutex_lock(&port->gather_stats_lock); 1591 memcpy(data, port->ethtool_stats, 1592 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); 1593 mutex_unlock(&port->gather_stats_lock); 1594 } 1595 1596 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) 1597 { 1598 struct mvpp2_port *port = netdev_priv(dev); 1599 1600 if (sset == ETH_SS_STATS) 1601 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); 1602 1603 return -EOPNOTSUPP; 1604 } 1605 1606 static void mvpp2_mac_reset_assert(struct mvpp2_port *port) 1607 { 1608 u32 val; 1609 1610 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | 1611 MVPP2_GMAC_PORT_RESET_MASK; 1612 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 1613 1614 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { 1615 val = readl(port->base + MVPP22_XLG_CTRL0_REG) & 1616 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; 1617 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1618 } 1619 } 1620 1621 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) 1622 { 1623 struct mvpp2 *priv = port->priv; 1624 void __iomem *mpcs, *xpcs; 1625 u32 val; 1626 1627 if (port->priv->hw_version != MVPP22 || port->gop_id != 0) 1628 return; 1629 1630 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 1631 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 1632 1633 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 1634 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); 1635 val |= MVPP22_MPCS_CLK_RESET_DIV_SET; 1636 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 1637 1638 val = readl(xpcs + MVPP22_XPCS_CFG0); 1639 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); 1640 } 1641 1642 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port) 1643 { 1644 struct mvpp2 *priv = port->priv; 1645 void __iomem *mpcs, *xpcs; 1646 u32 val; 1647 1648 if (port->priv->hw_version != MVPP22 || port->gop_id != 0) 1649 return; 1650 1651 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 1652 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 1653 1654 switch (port->phy_interface) { 1655 case PHY_INTERFACE_MODE_10GBASER: 1656 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 1657 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | 1658 MAC_CLK_RESET_SD_TX; 1659 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; 1660 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 1661 break; 1662 case PHY_INTERFACE_MODE_XAUI: 1663 case PHY_INTERFACE_MODE_RXAUI: 1664 val = readl(xpcs + MVPP22_XPCS_CFG0); 1665 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); 1666 break; 1667 default: 1668 break; 1669 } 1670 } 1671 1672 /* Change maximum receive size of the port */ 1673 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 1674 { 1675 u32 val; 1676 1677 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1678 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 1679 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 1680 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 1681 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1682 } 1683 1684 /* Change maximum receive size of the port */ 1685 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) 1686 { 1687 u32 val; 1688 1689 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 1690 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; 1691 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << 1692 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; 1693 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 1694 } 1695 1696 /* Set defaults to the MVPP2 port */ 1697 static void mvpp2_defaults_set(struct mvpp2_port *port) 1698 { 1699 int tx_port_num, val, queue, lrxq; 1700 1701 if (port->priv->hw_version == MVPP21) { 1702 /* Update TX FIFO MIN Threshold */ 1703 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 1704 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 1705 /* Min. TX threshold must be less than minimal packet length */ 1706 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 1707 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 1708 } 1709 1710 /* Disable Legacy WRR, Disable EJP, Release from reset */ 1711 tx_port_num = mvpp2_egress_port(port); 1712 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 1713 tx_port_num); 1714 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 1715 1716 /* Set TXQ scheduling to Round-Robin */ 1717 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); 1718 1719 /* Close bandwidth for all queues */ 1720 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) 1721 mvpp2_write(port->priv, 1722 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); 1723 1724 /* Set refill period to 1 usec, refill tokens 1725 * and bucket size to maximum 1726 */ 1727 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 1728 port->priv->tclk / USEC_PER_SEC); 1729 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 1730 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 1731 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 1732 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 1733 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 1734 val = MVPP2_TXP_TOKEN_SIZE_MAX; 1735 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 1736 1737 /* Set MaximumLowLatencyPacketSize value to 256 */ 1738 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 1739 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 1740 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 1741 1742 /* Enable Rx cache snoop */ 1743 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 1744 queue = port->rxqs[lrxq]->id; 1745 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 1746 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 1747 MVPP2_SNOOP_BUF_HDR_MASK; 1748 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 1749 } 1750 1751 /* At default, mask all interrupts to all present cpus */ 1752 mvpp2_interrupts_disable(port); 1753 } 1754 1755 /* Enable/disable receiving packets */ 1756 static void mvpp2_ingress_enable(struct mvpp2_port *port) 1757 { 1758 u32 val; 1759 int lrxq, queue; 1760 1761 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 1762 queue = port->rxqs[lrxq]->id; 1763 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 1764 val &= ~MVPP2_RXQ_DISABLE_MASK; 1765 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 1766 } 1767 } 1768 1769 static void mvpp2_ingress_disable(struct mvpp2_port *port) 1770 { 1771 u32 val; 1772 int lrxq, queue; 1773 1774 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 1775 queue = port->rxqs[lrxq]->id; 1776 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 1777 val |= MVPP2_RXQ_DISABLE_MASK; 1778 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 1779 } 1780 } 1781 1782 /* Enable transmit via physical egress queue 1783 * - HW starts take descriptors from DRAM 1784 */ 1785 static void mvpp2_egress_enable(struct mvpp2_port *port) 1786 { 1787 u32 qmap; 1788 int queue; 1789 int tx_port_num = mvpp2_egress_port(port); 1790 1791 /* Enable all initialized TXs. */ 1792 qmap = 0; 1793 for (queue = 0; queue < port->ntxqs; queue++) { 1794 struct mvpp2_tx_queue *txq = port->txqs[queue]; 1795 1796 if (txq->descs) 1797 qmap |= (1 << queue); 1798 } 1799 1800 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 1801 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 1802 } 1803 1804 /* Disable transmit via physical egress queue 1805 * - HW doesn't take descriptors from DRAM 1806 */ 1807 static void mvpp2_egress_disable(struct mvpp2_port *port) 1808 { 1809 u32 reg_data; 1810 int delay; 1811 int tx_port_num = mvpp2_egress_port(port); 1812 1813 /* Issue stop command for active channels only */ 1814 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 1815 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 1816 MVPP2_TXP_SCHED_ENQ_MASK; 1817 if (reg_data != 0) 1818 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 1819 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 1820 1821 /* Wait for all Tx activity to terminate. */ 1822 delay = 0; 1823 do { 1824 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 1825 netdev_warn(port->dev, 1826 "Tx stop timed out, status=0x%08x\n", 1827 reg_data); 1828 break; 1829 } 1830 mdelay(1); 1831 delay++; 1832 1833 /* Check port TX Command register that all 1834 * Tx queues are stopped 1835 */ 1836 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 1837 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 1838 } 1839 1840 /* Rx descriptors helper methods */ 1841 1842 /* Get number of Rx descriptors occupied by received packets */ 1843 static inline int 1844 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 1845 { 1846 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 1847 1848 return val & MVPP2_RXQ_OCCUPIED_MASK; 1849 } 1850 1851 /* Update Rx queue status with the number of occupied and available 1852 * Rx descriptor slots. 1853 */ 1854 static inline void 1855 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 1856 int used_count, int free_count) 1857 { 1858 /* Decrement the number of used descriptors and increment count 1859 * increment the number of free descriptors. 1860 */ 1861 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 1862 1863 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 1864 } 1865 1866 /* Get pointer to next RX descriptor to be processed by SW */ 1867 static inline struct mvpp2_rx_desc * 1868 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 1869 { 1870 int rx_desc = rxq->next_desc_to_proc; 1871 1872 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 1873 prefetch(rxq->descs + rxq->next_desc_to_proc); 1874 return rxq->descs + rx_desc; 1875 } 1876 1877 /* Set rx queue offset */ 1878 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 1879 int prxq, int offset) 1880 { 1881 u32 val; 1882 1883 /* Convert offset from bytes to units of 32 bytes */ 1884 offset = offset >> 5; 1885 1886 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 1887 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 1888 1889 /* Offset is in */ 1890 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 1891 MVPP2_RXQ_PACKET_OFFSET_MASK); 1892 1893 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 1894 } 1895 1896 /* Tx descriptors helper methods */ 1897 1898 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 1899 static struct mvpp2_tx_desc * 1900 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 1901 { 1902 int tx_desc = txq->next_desc_to_proc; 1903 1904 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 1905 return txq->descs + tx_desc; 1906 } 1907 1908 /* Update HW with number of aggregated Tx descriptors to be sent 1909 * 1910 * Called only from mvpp2_tx(), so migration is disabled, using 1911 * smp_processor_id() is OK. 1912 */ 1913 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 1914 { 1915 /* aggregated access - relevant TXQ number is written in TX desc */ 1916 mvpp2_thread_write(port->priv, 1917 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 1918 MVPP2_AGGR_TXQ_UPDATE_REG, pending); 1919 } 1920 1921 /* Check if there are enough free descriptors in aggregated txq. 1922 * If not, update the number of occupied descriptors and repeat the check. 1923 * 1924 * Called only from mvpp2_tx(), so migration is disabled, using 1925 * smp_processor_id() is OK. 1926 */ 1927 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, 1928 struct mvpp2_tx_queue *aggr_txq, int num) 1929 { 1930 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { 1931 /* Update number of occupied aggregated Tx descriptors */ 1932 unsigned int thread = 1933 mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 1934 u32 val = mvpp2_read_relaxed(port->priv, 1935 MVPP2_AGGR_TXQ_STATUS_REG(thread)); 1936 1937 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; 1938 1939 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) 1940 return -ENOMEM; 1941 } 1942 return 0; 1943 } 1944 1945 /* Reserved Tx descriptors allocation request 1946 * 1947 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called 1948 * only by mvpp2_tx(), so migration is disabled, using 1949 * smp_processor_id() is OK. 1950 */ 1951 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, 1952 struct mvpp2_tx_queue *txq, int num) 1953 { 1954 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 1955 struct mvpp2 *priv = port->priv; 1956 u32 val; 1957 1958 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; 1959 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); 1960 1961 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); 1962 1963 return val & MVPP2_TXQ_RSVD_RSLT_MASK; 1964 } 1965 1966 /* Check if there are enough reserved descriptors for transmission. 1967 * If not, request chunk of reserved descriptors and check again. 1968 */ 1969 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, 1970 struct mvpp2_tx_queue *txq, 1971 struct mvpp2_txq_pcpu *txq_pcpu, 1972 int num) 1973 { 1974 int req, desc_count; 1975 unsigned int thread; 1976 1977 if (txq_pcpu->reserved_num >= num) 1978 return 0; 1979 1980 /* Not enough descriptors reserved! Update the reserved descriptor 1981 * count and check again. 1982 */ 1983 1984 desc_count = 0; 1985 /* Compute total of used descriptors */ 1986 for (thread = 0; thread < port->priv->nthreads; thread++) { 1987 struct mvpp2_txq_pcpu *txq_pcpu_aux; 1988 1989 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); 1990 desc_count += txq_pcpu_aux->count; 1991 desc_count += txq_pcpu_aux->reserved_num; 1992 } 1993 1994 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); 1995 desc_count += req; 1996 1997 if (desc_count > 1998 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) 1999 return -ENOMEM; 2000 2001 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); 2002 2003 /* OK, the descriptor could have been updated: check again. */ 2004 if (txq_pcpu->reserved_num < num) 2005 return -ENOMEM; 2006 return 0; 2007 } 2008 2009 /* Release the last allocated Tx descriptor. Useful to handle DMA 2010 * mapping failures in the Tx path. 2011 */ 2012 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) 2013 { 2014 if (txq->next_desc_to_proc == 0) 2015 txq->next_desc_to_proc = txq->last_desc - 1; 2016 else 2017 txq->next_desc_to_proc--; 2018 } 2019 2020 /* Set Tx descriptors fields relevant for CSUM calculation */ 2021 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, 2022 int ip_hdr_len, int l4_proto) 2023 { 2024 u32 command; 2025 2026 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 2027 * G_L4_chk, L4_type required only for checksum calculation 2028 */ 2029 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); 2030 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); 2031 command |= MVPP2_TXD_IP_CSUM_DISABLE; 2032 2033 if (l3_proto == htons(ETH_P_IP)) { 2034 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ 2035 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ 2036 } else { 2037 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ 2038 } 2039 2040 if (l4_proto == IPPROTO_TCP) { 2041 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ 2042 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 2043 } else if (l4_proto == IPPROTO_UDP) { 2044 command |= MVPP2_TXD_L4_UDP; /* enable UDP */ 2045 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 2046 } else { 2047 command |= MVPP2_TXD_L4_CSUM_NOT; 2048 } 2049 2050 return command; 2051 } 2052 2053 /* Get number of sent descriptors and decrement counter. 2054 * The number of sent descriptors is returned. 2055 * Per-thread access 2056 * 2057 * Called only from mvpp2_txq_done(), called from mvpp2_tx() 2058 * (migration disabled) and from the TX completion tasklet (migration 2059 * disabled) so using smp_processor_id() is OK. 2060 */ 2061 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 2062 struct mvpp2_tx_queue *txq) 2063 { 2064 u32 val; 2065 2066 /* Reading status reg resets transmitted descriptor counter */ 2067 val = mvpp2_thread_read_relaxed(port->priv, 2068 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2069 MVPP2_TXQ_SENT_REG(txq->id)); 2070 2071 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 2072 MVPP2_TRANSMITTED_COUNT_OFFSET; 2073 } 2074 2075 /* Called through on_each_cpu(), so runs on all CPUs, with migration 2076 * disabled, therefore using smp_processor_id() is OK. 2077 */ 2078 static void mvpp2_txq_sent_counter_clear(void *arg) 2079 { 2080 struct mvpp2_port *port = arg; 2081 int queue; 2082 2083 /* If the thread isn't used, don't do anything */ 2084 if (smp_processor_id() > port->priv->nthreads) 2085 return; 2086 2087 for (queue = 0; queue < port->ntxqs; queue++) { 2088 int id = port->txqs[queue]->id; 2089 2090 mvpp2_thread_read(port->priv, 2091 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2092 MVPP2_TXQ_SENT_REG(id)); 2093 } 2094 } 2095 2096 /* Set max sizes for Tx queues */ 2097 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 2098 { 2099 u32 val, size, mtu; 2100 int txq, tx_port_num; 2101 2102 mtu = port->pkt_size * 8; 2103 if (mtu > MVPP2_TXP_MTU_MAX) 2104 mtu = MVPP2_TXP_MTU_MAX; 2105 2106 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 2107 mtu = 3 * mtu; 2108 2109 /* Indirect access to registers */ 2110 tx_port_num = mvpp2_egress_port(port); 2111 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2112 2113 /* Set MTU */ 2114 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 2115 val &= ~MVPP2_TXP_MTU_MAX; 2116 val |= mtu; 2117 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 2118 2119 /* TXP token size and all TXQs token size must be larger that MTU */ 2120 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 2121 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 2122 if (size < mtu) { 2123 size = mtu; 2124 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 2125 val |= size; 2126 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 2127 } 2128 2129 for (txq = 0; txq < port->ntxqs; txq++) { 2130 val = mvpp2_read(port->priv, 2131 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 2132 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 2133 2134 if (size < mtu) { 2135 size = mtu; 2136 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 2137 val |= size; 2138 mvpp2_write(port->priv, 2139 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 2140 val); 2141 } 2142 } 2143 } 2144 2145 /* Set the number of packets that will be received before Rx interrupt 2146 * will be generated by HW. 2147 */ 2148 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, 2149 struct mvpp2_rx_queue *rxq) 2150 { 2151 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2152 2153 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) 2154 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; 2155 2156 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 2157 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, 2158 rxq->pkts_coal); 2159 2160 put_cpu(); 2161 } 2162 2163 /* For some reason in the LSP this is done on each CPU. Why ? */ 2164 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, 2165 struct mvpp2_tx_queue *txq) 2166 { 2167 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2168 u32 val; 2169 2170 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) 2171 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; 2172 2173 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); 2174 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 2175 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); 2176 2177 put_cpu(); 2178 } 2179 2180 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) 2181 { 2182 u64 tmp = (u64)clk_hz * usec; 2183 2184 do_div(tmp, USEC_PER_SEC); 2185 2186 return tmp > U32_MAX ? U32_MAX : tmp; 2187 } 2188 2189 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) 2190 { 2191 u64 tmp = (u64)cycles * USEC_PER_SEC; 2192 2193 do_div(tmp, clk_hz); 2194 2195 return tmp > U32_MAX ? U32_MAX : tmp; 2196 } 2197 2198 /* Set the time delay in usec before Rx interrupt */ 2199 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, 2200 struct mvpp2_rx_queue *rxq) 2201 { 2202 unsigned long freq = port->priv->tclk; 2203 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 2204 2205 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { 2206 rxq->time_coal = 2207 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); 2208 2209 /* re-evaluate to get actual register value */ 2210 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 2211 } 2212 2213 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); 2214 } 2215 2216 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) 2217 { 2218 unsigned long freq = port->priv->tclk; 2219 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 2220 2221 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { 2222 port->tx_time_coal = 2223 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); 2224 2225 /* re-evaluate to get actual register value */ 2226 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 2227 } 2228 2229 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); 2230 } 2231 2232 /* Free Tx queue skbuffs */ 2233 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 2234 struct mvpp2_tx_queue *txq, 2235 struct mvpp2_txq_pcpu *txq_pcpu, int num) 2236 { 2237 int i; 2238 2239 for (i = 0; i < num; i++) { 2240 struct mvpp2_txq_pcpu_buf *tx_buf = 2241 txq_pcpu->buffs + txq_pcpu->txq_get_index; 2242 2243 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) 2244 dma_unmap_single(port->dev->dev.parent, tx_buf->dma, 2245 tx_buf->size, DMA_TO_DEVICE); 2246 if (tx_buf->skb) 2247 dev_kfree_skb_any(tx_buf->skb); 2248 2249 mvpp2_txq_inc_get(txq_pcpu); 2250 } 2251 } 2252 2253 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 2254 u32 cause) 2255 { 2256 int queue = fls(cause) - 1; 2257 2258 return port->rxqs[queue]; 2259 } 2260 2261 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 2262 u32 cause) 2263 { 2264 int queue = fls(cause) - 1; 2265 2266 return port->txqs[queue]; 2267 } 2268 2269 /* Handle end of transmission */ 2270 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 2271 struct mvpp2_txq_pcpu *txq_pcpu) 2272 { 2273 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); 2274 int tx_done; 2275 2276 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) 2277 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); 2278 2279 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 2280 if (!tx_done) 2281 return; 2282 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); 2283 2284 txq_pcpu->count -= tx_done; 2285 2286 if (netif_tx_queue_stopped(nq)) 2287 if (txq_pcpu->count <= txq_pcpu->wake_threshold) 2288 netif_tx_wake_queue(nq); 2289 } 2290 2291 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, 2292 unsigned int thread) 2293 { 2294 struct mvpp2_tx_queue *txq; 2295 struct mvpp2_txq_pcpu *txq_pcpu; 2296 unsigned int tx_todo = 0; 2297 2298 while (cause) { 2299 txq = mvpp2_get_tx_queue(port, cause); 2300 if (!txq) 2301 break; 2302 2303 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 2304 2305 if (txq_pcpu->count) { 2306 mvpp2_txq_done(port, txq, txq_pcpu); 2307 tx_todo += txq_pcpu->count; 2308 } 2309 2310 cause &= ~(1 << txq->log_id); 2311 } 2312 return tx_todo; 2313 } 2314 2315 /* Rx/Tx queue initialization/cleanup methods */ 2316 2317 /* Allocate and initialize descriptors for aggr TXQ */ 2318 static int mvpp2_aggr_txq_init(struct platform_device *pdev, 2319 struct mvpp2_tx_queue *aggr_txq, 2320 unsigned int thread, struct mvpp2 *priv) 2321 { 2322 u32 txq_dma; 2323 2324 /* Allocate memory for TX descriptors */ 2325 aggr_txq->descs = dma_alloc_coherent(&pdev->dev, 2326 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2327 &aggr_txq->descs_dma, GFP_KERNEL); 2328 if (!aggr_txq->descs) 2329 return -ENOMEM; 2330 2331 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; 2332 2333 /* Aggr TXQ no reset WA */ 2334 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 2335 MVPP2_AGGR_TXQ_INDEX_REG(thread)); 2336 2337 /* Set Tx descriptors queue starting address indirect 2338 * access 2339 */ 2340 if (priv->hw_version == MVPP21) 2341 txq_dma = aggr_txq->descs_dma; 2342 else 2343 txq_dma = aggr_txq->descs_dma >> 2344 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 2345 2346 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); 2347 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), 2348 MVPP2_AGGR_TXQ_SIZE); 2349 2350 return 0; 2351 } 2352 2353 /* Create a specified Rx queue */ 2354 static int mvpp2_rxq_init(struct mvpp2_port *port, 2355 struct mvpp2_rx_queue *rxq) 2356 2357 { 2358 unsigned int thread; 2359 u32 rxq_dma; 2360 2361 rxq->size = port->rx_ring_size; 2362 2363 /* Allocate memory for RX descriptors */ 2364 rxq->descs = dma_alloc_coherent(port->dev->dev.parent, 2365 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 2366 &rxq->descs_dma, GFP_KERNEL); 2367 if (!rxq->descs) 2368 return -ENOMEM; 2369 2370 rxq->last_desc = rxq->size - 1; 2371 2372 /* Zero occupied and non-occupied counters - direct access */ 2373 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 2374 2375 /* Set Rx descriptors queue starting address - indirect access */ 2376 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2377 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 2378 if (port->priv->hw_version == MVPP21) 2379 rxq_dma = rxq->descs_dma; 2380 else 2381 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 2382 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 2383 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 2384 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); 2385 put_cpu(); 2386 2387 /* Set Offset */ 2388 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 2389 2390 /* Set coalescing pkts and time */ 2391 mvpp2_rx_pkts_coal_set(port, rxq); 2392 mvpp2_rx_time_coal_set(port, rxq); 2393 2394 /* Add number of descriptors ready for receiving packets */ 2395 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 2396 2397 return 0; 2398 } 2399 2400 /* Push packets received by the RXQ to BM pool */ 2401 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 2402 struct mvpp2_rx_queue *rxq) 2403 { 2404 int rx_received, i; 2405 2406 rx_received = mvpp2_rxq_received(port, rxq->id); 2407 if (!rx_received) 2408 return; 2409 2410 for (i = 0; i < rx_received; i++) { 2411 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 2412 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 2413 int pool; 2414 2415 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> 2416 MVPP2_RXD_BM_POOL_ID_OFFS; 2417 2418 mvpp2_bm_pool_put(port, pool, 2419 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 2420 mvpp2_rxdesc_cookie_get(port, rx_desc)); 2421 } 2422 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 2423 } 2424 2425 /* Cleanup Rx queue */ 2426 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 2427 struct mvpp2_rx_queue *rxq) 2428 { 2429 unsigned int thread; 2430 2431 mvpp2_rxq_drop_pkts(port, rxq); 2432 2433 if (rxq->descs) 2434 dma_free_coherent(port->dev->dev.parent, 2435 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 2436 rxq->descs, 2437 rxq->descs_dma); 2438 2439 rxq->descs = NULL; 2440 rxq->last_desc = 0; 2441 rxq->next_desc_to_proc = 0; 2442 rxq->descs_dma = 0; 2443 2444 /* Clear Rx descriptors queue starting address and size; 2445 * free descriptor number 2446 */ 2447 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 2448 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2449 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 2450 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); 2451 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); 2452 put_cpu(); 2453 } 2454 2455 /* Create and initialize a Tx queue */ 2456 static int mvpp2_txq_init(struct mvpp2_port *port, 2457 struct mvpp2_tx_queue *txq) 2458 { 2459 u32 val; 2460 unsigned int thread; 2461 int desc, desc_per_txq, tx_port_num; 2462 struct mvpp2_txq_pcpu *txq_pcpu; 2463 2464 txq->size = port->tx_ring_size; 2465 2466 /* Allocate memory for Tx descriptors */ 2467 txq->descs = dma_alloc_coherent(port->dev->dev.parent, 2468 txq->size * MVPP2_DESC_ALIGNED_SIZE, 2469 &txq->descs_dma, GFP_KERNEL); 2470 if (!txq->descs) 2471 return -ENOMEM; 2472 2473 txq->last_desc = txq->size - 1; 2474 2475 /* Set Tx descriptors queue starting address - indirect access */ 2476 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2477 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 2478 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 2479 txq->descs_dma); 2480 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 2481 txq->size & MVPP2_TXQ_DESC_SIZE_MASK); 2482 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); 2483 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, 2484 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 2485 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); 2486 val &= ~MVPP2_TXQ_PENDING_MASK; 2487 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); 2488 2489 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 2490 * for each existing TXQ. 2491 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 2492 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS 2493 */ 2494 desc_per_txq = 16; 2495 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 2496 (txq->log_id * desc_per_txq); 2497 2498 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, 2499 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 2500 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 2501 put_cpu(); 2502 2503 /* WRR / EJP configuration - indirect access */ 2504 tx_port_num = mvpp2_egress_port(port); 2505 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2506 2507 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 2508 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 2509 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 2510 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 2511 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 2512 2513 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 2514 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 2515 val); 2516 2517 for (thread = 0; thread < port->priv->nthreads; thread++) { 2518 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 2519 txq_pcpu->size = txq->size; 2520 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, 2521 sizeof(*txq_pcpu->buffs), 2522 GFP_KERNEL); 2523 if (!txq_pcpu->buffs) 2524 return -ENOMEM; 2525 2526 txq_pcpu->count = 0; 2527 txq_pcpu->reserved_num = 0; 2528 txq_pcpu->txq_put_index = 0; 2529 txq_pcpu->txq_get_index = 0; 2530 txq_pcpu->tso_headers = NULL; 2531 2532 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; 2533 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; 2534 2535 txq_pcpu->tso_headers = 2536 dma_alloc_coherent(port->dev->dev.parent, 2537 txq_pcpu->size * TSO_HEADER_SIZE, 2538 &txq_pcpu->tso_headers_dma, 2539 GFP_KERNEL); 2540 if (!txq_pcpu->tso_headers) 2541 return -ENOMEM; 2542 } 2543 2544 return 0; 2545 } 2546 2547 /* Free allocated TXQ resources */ 2548 static void mvpp2_txq_deinit(struct mvpp2_port *port, 2549 struct mvpp2_tx_queue *txq) 2550 { 2551 struct mvpp2_txq_pcpu *txq_pcpu; 2552 unsigned int thread; 2553 2554 for (thread = 0; thread < port->priv->nthreads; thread++) { 2555 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 2556 kfree(txq_pcpu->buffs); 2557 2558 if (txq_pcpu->tso_headers) 2559 dma_free_coherent(port->dev->dev.parent, 2560 txq_pcpu->size * TSO_HEADER_SIZE, 2561 txq_pcpu->tso_headers, 2562 txq_pcpu->tso_headers_dma); 2563 2564 txq_pcpu->tso_headers = NULL; 2565 } 2566 2567 if (txq->descs) 2568 dma_free_coherent(port->dev->dev.parent, 2569 txq->size * MVPP2_DESC_ALIGNED_SIZE, 2570 txq->descs, txq->descs_dma); 2571 2572 txq->descs = NULL; 2573 txq->last_desc = 0; 2574 txq->next_desc_to_proc = 0; 2575 txq->descs_dma = 0; 2576 2577 /* Set minimum bandwidth for disabled TXQs */ 2578 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); 2579 2580 /* Set Tx descriptors queue starting address and size */ 2581 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2582 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 2583 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); 2584 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); 2585 put_cpu(); 2586 } 2587 2588 /* Cleanup Tx ports */ 2589 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 2590 { 2591 struct mvpp2_txq_pcpu *txq_pcpu; 2592 int delay, pending; 2593 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2594 u32 val; 2595 2596 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 2597 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); 2598 val |= MVPP2_TXQ_DRAIN_EN_MASK; 2599 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); 2600 2601 /* The napi queue has been stopped so wait for all packets 2602 * to be transmitted. 2603 */ 2604 delay = 0; 2605 do { 2606 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 2607 netdev_warn(port->dev, 2608 "port %d: cleaning queue %d timed out\n", 2609 port->id, txq->log_id); 2610 break; 2611 } 2612 mdelay(1); 2613 delay++; 2614 2615 pending = mvpp2_thread_read(port->priv, thread, 2616 MVPP2_TXQ_PENDING_REG); 2617 pending &= MVPP2_TXQ_PENDING_MASK; 2618 } while (pending); 2619 2620 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 2621 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); 2622 put_cpu(); 2623 2624 for (thread = 0; thread < port->priv->nthreads; thread++) { 2625 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 2626 2627 /* Release all packets */ 2628 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 2629 2630 /* Reset queue */ 2631 txq_pcpu->count = 0; 2632 txq_pcpu->txq_put_index = 0; 2633 txq_pcpu->txq_get_index = 0; 2634 } 2635 } 2636 2637 /* Cleanup all Tx queues */ 2638 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 2639 { 2640 struct mvpp2_tx_queue *txq; 2641 int queue; 2642 u32 val; 2643 2644 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 2645 2646 /* Reset Tx ports and delete Tx queues */ 2647 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 2648 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 2649 2650 for (queue = 0; queue < port->ntxqs; queue++) { 2651 txq = port->txqs[queue]; 2652 mvpp2_txq_clean(port, txq); 2653 mvpp2_txq_deinit(port, txq); 2654 } 2655 2656 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 2657 2658 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 2659 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 2660 } 2661 2662 /* Cleanup all Rx queues */ 2663 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 2664 { 2665 int queue; 2666 2667 for (queue = 0; queue < port->nrxqs; queue++) 2668 mvpp2_rxq_deinit(port, port->rxqs[queue]); 2669 } 2670 2671 /* Init all Rx queues for port */ 2672 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 2673 { 2674 int queue, err; 2675 2676 for (queue = 0; queue < port->nrxqs; queue++) { 2677 err = mvpp2_rxq_init(port, port->rxqs[queue]); 2678 if (err) 2679 goto err_cleanup; 2680 } 2681 return 0; 2682 2683 err_cleanup: 2684 mvpp2_cleanup_rxqs(port); 2685 return err; 2686 } 2687 2688 /* Init all tx queues for port */ 2689 static int mvpp2_setup_txqs(struct mvpp2_port *port) 2690 { 2691 struct mvpp2_tx_queue *txq; 2692 int queue, err, cpu; 2693 2694 for (queue = 0; queue < port->ntxqs; queue++) { 2695 txq = port->txqs[queue]; 2696 err = mvpp2_txq_init(port, txq); 2697 if (err) 2698 goto err_cleanup; 2699 2700 /* Assign this queue to a CPU */ 2701 cpu = queue % num_present_cpus(); 2702 netif_set_xps_queue(port->dev, cpumask_of(cpu), queue); 2703 } 2704 2705 if (port->has_tx_irqs) { 2706 mvpp2_tx_time_coal_set(port); 2707 for (queue = 0; queue < port->ntxqs; queue++) { 2708 txq = port->txqs[queue]; 2709 mvpp2_tx_pkts_coal_set(port, txq); 2710 } 2711 } 2712 2713 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 2714 return 0; 2715 2716 err_cleanup: 2717 mvpp2_cleanup_txqs(port); 2718 return err; 2719 } 2720 2721 /* The callback for per-port interrupt */ 2722 static irqreturn_t mvpp2_isr(int irq, void *dev_id) 2723 { 2724 struct mvpp2_queue_vector *qv = dev_id; 2725 2726 mvpp2_qvec_interrupt_disable(qv); 2727 2728 napi_schedule(&qv->napi); 2729 2730 return IRQ_HANDLED; 2731 } 2732 2733 /* Per-port interrupt for link status changes */ 2734 static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) 2735 { 2736 struct mvpp2_port *port = (struct mvpp2_port *)dev_id; 2737 struct net_device *dev = port->dev; 2738 bool event = false, link = false; 2739 u32 val; 2740 2741 mvpp22_gop_mask_irq(port); 2742 2743 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { 2744 val = readl(port->base + MVPP22_XLG_INT_STAT); 2745 if (val & MVPP22_XLG_INT_STAT_LINK) { 2746 event = true; 2747 val = readl(port->base + MVPP22_XLG_STATUS); 2748 if (val & MVPP22_XLG_STATUS_LINK_UP) 2749 link = true; 2750 } 2751 } else if (phy_interface_mode_is_rgmii(port->phy_interface) || 2752 phy_interface_mode_is_8023z(port->phy_interface) || 2753 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 2754 val = readl(port->base + MVPP22_GMAC_INT_STAT); 2755 if (val & MVPP22_GMAC_INT_STAT_LINK) { 2756 event = true; 2757 val = readl(port->base + MVPP2_GMAC_STATUS0); 2758 if (val & MVPP2_GMAC_STATUS0_LINK_UP) 2759 link = true; 2760 } 2761 } 2762 2763 if (port->phylink) { 2764 phylink_mac_change(port->phylink, link); 2765 goto handled; 2766 } 2767 2768 if (!netif_running(dev) || !event) 2769 goto handled; 2770 2771 if (link) { 2772 mvpp2_interrupts_enable(port); 2773 2774 mvpp2_egress_enable(port); 2775 mvpp2_ingress_enable(port); 2776 netif_carrier_on(dev); 2777 netif_tx_wake_all_queues(dev); 2778 } else { 2779 netif_tx_stop_all_queues(dev); 2780 netif_carrier_off(dev); 2781 mvpp2_ingress_disable(port); 2782 mvpp2_egress_disable(port); 2783 2784 mvpp2_interrupts_disable(port); 2785 } 2786 2787 handled: 2788 mvpp22_gop_unmask_irq(port); 2789 return IRQ_HANDLED; 2790 } 2791 2792 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) 2793 { 2794 struct net_device *dev; 2795 struct mvpp2_port *port; 2796 struct mvpp2_port_pcpu *port_pcpu; 2797 unsigned int tx_todo, cause; 2798 2799 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); 2800 dev = port_pcpu->dev; 2801 2802 if (!netif_running(dev)) 2803 return HRTIMER_NORESTART; 2804 2805 port_pcpu->timer_scheduled = false; 2806 port = netdev_priv(dev); 2807 2808 /* Process all the Tx queues */ 2809 cause = (1 << port->ntxqs) - 1; 2810 tx_todo = mvpp2_tx_done(port, cause, 2811 mvpp2_cpu_to_thread(port->priv, smp_processor_id())); 2812 2813 /* Set the timer in case not all the packets were processed */ 2814 if (tx_todo && !port_pcpu->timer_scheduled) { 2815 port_pcpu->timer_scheduled = true; 2816 hrtimer_forward_now(&port_pcpu->tx_done_timer, 2817 MVPP2_TXDONE_HRTIMER_PERIOD_NS); 2818 2819 return HRTIMER_RESTART; 2820 } 2821 return HRTIMER_NORESTART; 2822 } 2823 2824 /* Main RX/TX processing routines */ 2825 2826 /* Display more error info */ 2827 static void mvpp2_rx_error(struct mvpp2_port *port, 2828 struct mvpp2_rx_desc *rx_desc) 2829 { 2830 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 2831 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 2832 char *err_str = NULL; 2833 2834 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 2835 case MVPP2_RXD_ERR_CRC: 2836 err_str = "crc"; 2837 break; 2838 case MVPP2_RXD_ERR_OVERRUN: 2839 err_str = "overrun"; 2840 break; 2841 case MVPP2_RXD_ERR_RESOURCE: 2842 err_str = "resource"; 2843 break; 2844 } 2845 if (err_str && net_ratelimit()) 2846 netdev_err(port->dev, 2847 "bad rx status %08x (%s error), size=%zu\n", 2848 status, err_str, sz); 2849 } 2850 2851 /* Handle RX checksum offload */ 2852 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, 2853 struct sk_buff *skb) 2854 { 2855 if (((status & MVPP2_RXD_L3_IP4) && 2856 !(status & MVPP2_RXD_IP4_HEADER_ERR)) || 2857 (status & MVPP2_RXD_L3_IP6)) 2858 if (((status & MVPP2_RXD_L4_UDP) || 2859 (status & MVPP2_RXD_L4_TCP)) && 2860 (status & MVPP2_RXD_L4_CSUM_OK)) { 2861 skb->csum = 0; 2862 skb->ip_summed = CHECKSUM_UNNECESSARY; 2863 return; 2864 } 2865 2866 skb->ip_summed = CHECKSUM_NONE; 2867 } 2868 2869 /* Allocate a new skb and add it to BM pool */ 2870 static int mvpp2_rx_refill(struct mvpp2_port *port, 2871 struct mvpp2_bm_pool *bm_pool, int pool) 2872 { 2873 dma_addr_t dma_addr; 2874 phys_addr_t phys_addr; 2875 void *buf; 2876 2877 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, 2878 GFP_ATOMIC); 2879 if (!buf) 2880 return -ENOMEM; 2881 2882 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2883 2884 return 0; 2885 } 2886 2887 /* Handle tx checksum */ 2888 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) 2889 { 2890 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2891 int ip_hdr_len = 0; 2892 u8 l4_proto; 2893 __be16 l3_proto = vlan_get_protocol(skb); 2894 2895 if (l3_proto == htons(ETH_P_IP)) { 2896 struct iphdr *ip4h = ip_hdr(skb); 2897 2898 /* Calculate IPv4 checksum and L4 checksum */ 2899 ip_hdr_len = ip4h->ihl; 2900 l4_proto = ip4h->protocol; 2901 } else if (l3_proto == htons(ETH_P_IPV6)) { 2902 struct ipv6hdr *ip6h = ipv6_hdr(skb); 2903 2904 /* Read l4_protocol from one of IPv6 extra headers */ 2905 if (skb_network_header_len(skb) > 0) 2906 ip_hdr_len = (skb_network_header_len(skb) >> 2); 2907 l4_proto = ip6h->nexthdr; 2908 } else { 2909 return MVPP2_TXD_L4_CSUM_NOT; 2910 } 2911 2912 return mvpp2_txq_desc_csum(skb_network_offset(skb), 2913 l3_proto, ip_hdr_len, l4_proto); 2914 } 2915 2916 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; 2917 } 2918 2919 /* Main rx processing */ 2920 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, 2921 int rx_todo, struct mvpp2_rx_queue *rxq) 2922 { 2923 struct net_device *dev = port->dev; 2924 int rx_received; 2925 int rx_done = 0; 2926 u32 rcvd_pkts = 0; 2927 u32 rcvd_bytes = 0; 2928 2929 /* Get number of received packets and clamp the to-do */ 2930 rx_received = mvpp2_rxq_received(port, rxq->id); 2931 if (rx_todo > rx_received) 2932 rx_todo = rx_received; 2933 2934 while (rx_done < rx_todo) { 2935 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 2936 struct mvpp2_bm_pool *bm_pool; 2937 struct sk_buff *skb; 2938 unsigned int frag_size; 2939 dma_addr_t dma_addr; 2940 phys_addr_t phys_addr; 2941 u32 rx_status; 2942 int pool, rx_bytes, err; 2943 void *data; 2944 2945 rx_done++; 2946 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 2947 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 2948 rx_bytes -= MVPP2_MH_SIZE; 2949 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 2950 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 2951 data = (void *)phys_to_virt(phys_addr); 2952 2953 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> 2954 MVPP2_RXD_BM_POOL_ID_OFFS; 2955 bm_pool = &port->priv->bm_pools[pool]; 2956 2957 /* In case of an error, release the requested buffer pointer 2958 * to the Buffer Manager. This request process is controlled 2959 * by the hardware, and the information about the buffer is 2960 * comprised by the RX descriptor. 2961 */ 2962 if (rx_status & MVPP2_RXD_ERR_SUMMARY) 2963 goto err_drop_frame; 2964 2965 dma_sync_single_for_cpu(dev->dev.parent, dma_addr, 2966 rx_bytes + MVPP2_MH_SIZE, 2967 DMA_FROM_DEVICE); 2968 prefetch(data); 2969 2970 if (bm_pool->frag_size > PAGE_SIZE) 2971 frag_size = 0; 2972 else 2973 frag_size = bm_pool->frag_size; 2974 2975 skb = build_skb(data, frag_size); 2976 if (!skb) { 2977 netdev_warn(port->dev, "skb build failed\n"); 2978 goto err_drop_frame; 2979 } 2980 2981 err = mvpp2_rx_refill(port, bm_pool, pool); 2982 if (err) { 2983 netdev_err(port->dev, "failed to refill BM pools\n"); 2984 goto err_drop_frame; 2985 } 2986 2987 dma_unmap_single_attrs(dev->dev.parent, dma_addr, 2988 bm_pool->buf_size, DMA_FROM_DEVICE, 2989 DMA_ATTR_SKIP_CPU_SYNC); 2990 2991 rcvd_pkts++; 2992 rcvd_bytes += rx_bytes; 2993 2994 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); 2995 skb_put(skb, rx_bytes); 2996 skb->protocol = eth_type_trans(skb, dev); 2997 mvpp2_rx_csum(port, rx_status, skb); 2998 2999 napi_gro_receive(napi, skb); 3000 continue; 3001 3002 err_drop_frame: 3003 dev->stats.rx_errors++; 3004 mvpp2_rx_error(port, rx_desc); 3005 /* Return the buffer to the pool */ 3006 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 3007 } 3008 3009 if (rcvd_pkts) { 3010 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 3011 3012 u64_stats_update_begin(&stats->syncp); 3013 stats->rx_packets += rcvd_pkts; 3014 stats->rx_bytes += rcvd_bytes; 3015 u64_stats_update_end(&stats->syncp); 3016 } 3017 3018 /* Update Rx queue management counters */ 3019 wmb(); 3020 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); 3021 3022 return rx_todo; 3023 } 3024 3025 static inline void 3026 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 3027 struct mvpp2_tx_desc *desc) 3028 { 3029 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 3030 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3031 3032 dma_addr_t buf_dma_addr = 3033 mvpp2_txdesc_dma_addr_get(port, desc); 3034 size_t buf_sz = 3035 mvpp2_txdesc_size_get(port, desc); 3036 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) 3037 dma_unmap_single(port->dev->dev.parent, buf_dma_addr, 3038 buf_sz, DMA_TO_DEVICE); 3039 mvpp2_txq_desc_put(txq); 3040 } 3041 3042 /* Handle tx fragmentation processing */ 3043 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, 3044 struct mvpp2_tx_queue *aggr_txq, 3045 struct mvpp2_tx_queue *txq) 3046 { 3047 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 3048 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3049 struct mvpp2_tx_desc *tx_desc; 3050 int i; 3051 dma_addr_t buf_dma_addr; 3052 3053 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3054 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3055 void *addr = skb_frag_address(frag); 3056 3057 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 3058 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 3059 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); 3060 3061 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, 3062 skb_frag_size(frag), 3063 DMA_TO_DEVICE); 3064 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { 3065 mvpp2_txq_desc_put(txq); 3066 goto cleanup; 3067 } 3068 3069 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 3070 3071 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 3072 /* Last descriptor */ 3073 mvpp2_txdesc_cmd_set(port, tx_desc, 3074 MVPP2_TXD_L_DESC); 3075 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); 3076 } else { 3077 /* Descriptor in the middle: Not First, Not Last */ 3078 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 3079 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 3080 } 3081 } 3082 3083 return 0; 3084 cleanup: 3085 /* Release all descriptors that were used to map fragments of 3086 * this packet, as well as the corresponding DMA mappings 3087 */ 3088 for (i = i - 1; i >= 0; i--) { 3089 tx_desc = txq->descs + i; 3090 tx_desc_unmap_put(port, txq, tx_desc); 3091 } 3092 3093 return -ENOMEM; 3094 } 3095 3096 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, 3097 struct net_device *dev, 3098 struct mvpp2_tx_queue *txq, 3099 struct mvpp2_tx_queue *aggr_txq, 3100 struct mvpp2_txq_pcpu *txq_pcpu, 3101 int hdr_sz) 3102 { 3103 struct mvpp2_port *port = netdev_priv(dev); 3104 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 3105 dma_addr_t addr; 3106 3107 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 3108 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); 3109 3110 addr = txq_pcpu->tso_headers_dma + 3111 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 3112 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); 3113 3114 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | 3115 MVPP2_TXD_F_DESC | 3116 MVPP2_TXD_PADDING_DISABLE); 3117 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 3118 } 3119 3120 static inline int mvpp2_tso_put_data(struct sk_buff *skb, 3121 struct net_device *dev, struct tso_t *tso, 3122 struct mvpp2_tx_queue *txq, 3123 struct mvpp2_tx_queue *aggr_txq, 3124 struct mvpp2_txq_pcpu *txq_pcpu, 3125 int sz, bool left, bool last) 3126 { 3127 struct mvpp2_port *port = netdev_priv(dev); 3128 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 3129 dma_addr_t buf_dma_addr; 3130 3131 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 3132 mvpp2_txdesc_size_set(port, tx_desc, sz); 3133 3134 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, 3135 DMA_TO_DEVICE); 3136 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 3137 mvpp2_txq_desc_put(txq); 3138 return -ENOMEM; 3139 } 3140 3141 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 3142 3143 if (!left) { 3144 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); 3145 if (last) { 3146 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); 3147 return 0; 3148 } 3149 } else { 3150 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 3151 } 3152 3153 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 3154 return 0; 3155 } 3156 3157 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, 3158 struct mvpp2_tx_queue *txq, 3159 struct mvpp2_tx_queue *aggr_txq, 3160 struct mvpp2_txq_pcpu *txq_pcpu) 3161 { 3162 struct mvpp2_port *port = netdev_priv(dev); 3163 struct tso_t tso; 3164 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); 3165 int i, len, descs = 0; 3166 3167 /* Check number of available descriptors */ 3168 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || 3169 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 3170 tso_count_descs(skb))) 3171 return 0; 3172 3173 tso_start(skb, &tso); 3174 len = skb->len - hdr_sz; 3175 while (len > 0) { 3176 int left = min_t(int, skb_shinfo(skb)->gso_size, len); 3177 char *hdr = txq_pcpu->tso_headers + 3178 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 3179 3180 len -= left; 3181 descs++; 3182 3183 tso_build_hdr(skb, hdr, &tso, left, len == 0); 3184 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); 3185 3186 while (left > 0) { 3187 int sz = min_t(int, tso.size, left); 3188 left -= sz; 3189 descs++; 3190 3191 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, 3192 txq_pcpu, sz, left, len == 0)) 3193 goto release; 3194 tso_build_data(skb, &tso, sz); 3195 } 3196 } 3197 3198 return descs; 3199 3200 release: 3201 for (i = descs - 1; i >= 0; i--) { 3202 struct mvpp2_tx_desc *tx_desc = txq->descs + i; 3203 tx_desc_unmap_put(port, txq, tx_desc); 3204 } 3205 return 0; 3206 } 3207 3208 /* Main tx processing */ 3209 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) 3210 { 3211 struct mvpp2_port *port = netdev_priv(dev); 3212 struct mvpp2_tx_queue *txq, *aggr_txq; 3213 struct mvpp2_txq_pcpu *txq_pcpu; 3214 struct mvpp2_tx_desc *tx_desc; 3215 dma_addr_t buf_dma_addr; 3216 unsigned long flags = 0; 3217 unsigned int thread; 3218 int frags = 0; 3219 u16 txq_id; 3220 u32 tx_cmd; 3221 3222 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 3223 3224 txq_id = skb_get_queue_mapping(skb); 3225 txq = port->txqs[txq_id]; 3226 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3227 aggr_txq = &port->priv->aggr_txqs[thread]; 3228 3229 if (test_bit(thread, &port->priv->lock_map)) 3230 spin_lock_irqsave(&port->tx_lock[thread], flags); 3231 3232 if (skb_is_gso(skb)) { 3233 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); 3234 goto out; 3235 } 3236 frags = skb_shinfo(skb)->nr_frags + 1; 3237 3238 /* Check number of available descriptors */ 3239 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || 3240 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { 3241 frags = 0; 3242 goto out; 3243 } 3244 3245 /* Get a descriptor for the first part of the packet */ 3246 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 3247 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 3248 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); 3249 3250 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, 3251 skb_headlen(skb), DMA_TO_DEVICE); 3252 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 3253 mvpp2_txq_desc_put(txq); 3254 frags = 0; 3255 goto out; 3256 } 3257 3258 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 3259 3260 tx_cmd = mvpp2_skb_tx_csum(port, skb); 3261 3262 if (frags == 1) { 3263 /* First and Last descriptor */ 3264 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 3265 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 3266 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); 3267 } else { 3268 /* First but not Last */ 3269 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 3270 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 3271 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); 3272 3273 /* Continue with other skb fragments */ 3274 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 3275 tx_desc_unmap_put(port, txq, tx_desc); 3276 frags = 0; 3277 } 3278 } 3279 3280 out: 3281 if (frags > 0) { 3282 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); 3283 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 3284 3285 txq_pcpu->reserved_num -= frags; 3286 txq_pcpu->count += frags; 3287 aggr_txq->count += frags; 3288 3289 /* Enable transmit */ 3290 wmb(); 3291 mvpp2_aggr_txq_pend_desc_add(port, frags); 3292 3293 if (txq_pcpu->count >= txq_pcpu->stop_threshold) 3294 netif_tx_stop_queue(nq); 3295 3296 u64_stats_update_begin(&stats->syncp); 3297 stats->tx_packets++; 3298 stats->tx_bytes += skb->len; 3299 u64_stats_update_end(&stats->syncp); 3300 } else { 3301 dev->stats.tx_dropped++; 3302 dev_kfree_skb_any(skb); 3303 } 3304 3305 /* Finalize TX processing */ 3306 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) 3307 mvpp2_txq_done(port, txq, txq_pcpu); 3308 3309 /* Set the timer in case not all frags were processed */ 3310 if (!port->has_tx_irqs && txq_pcpu->count <= frags && 3311 txq_pcpu->count > 0) { 3312 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); 3313 3314 if (!port_pcpu->timer_scheduled) { 3315 port_pcpu->timer_scheduled = true; 3316 hrtimer_start(&port_pcpu->tx_done_timer, 3317 MVPP2_TXDONE_HRTIMER_PERIOD_NS, 3318 HRTIMER_MODE_REL_PINNED_SOFT); 3319 } 3320 } 3321 3322 if (test_bit(thread, &port->priv->lock_map)) 3323 spin_unlock_irqrestore(&port->tx_lock[thread], flags); 3324 3325 return NETDEV_TX_OK; 3326 } 3327 3328 static inline void mvpp2_cause_error(struct net_device *dev, int cause) 3329 { 3330 if (cause & MVPP2_CAUSE_FCS_ERR_MASK) 3331 netdev_err(dev, "FCS error\n"); 3332 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) 3333 netdev_err(dev, "rx fifo overrun error\n"); 3334 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) 3335 netdev_err(dev, "tx fifo underrun error\n"); 3336 } 3337 3338 static int mvpp2_poll(struct napi_struct *napi, int budget) 3339 { 3340 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; 3341 int rx_done = 0; 3342 struct mvpp2_port *port = netdev_priv(napi->dev); 3343 struct mvpp2_queue_vector *qv; 3344 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 3345 3346 qv = container_of(napi, struct mvpp2_queue_vector, napi); 3347 3348 /* Rx/Tx cause register 3349 * 3350 * Bits 0-15: each bit indicates received packets on the Rx queue 3351 * (bit 0 is for Rx queue 0). 3352 * 3353 * Bits 16-23: each bit indicates transmitted packets on the Tx queue 3354 * (bit 16 is for Tx queue 0). 3355 * 3356 * Each CPU has its own Rx/Tx cause register 3357 */ 3358 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, 3359 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 3360 3361 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 3362 if (cause_misc) { 3363 mvpp2_cause_error(port->dev, cause_misc); 3364 3365 /* Clear the cause register */ 3366 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); 3367 mvpp2_thread_write(port->priv, thread, 3368 MVPP2_ISR_RX_TX_CAUSE_REG(port->id), 3369 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 3370 } 3371 3372 if (port->has_tx_irqs) { 3373 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 3374 if (cause_tx) { 3375 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; 3376 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); 3377 } 3378 } 3379 3380 /* Process RX packets */ 3381 cause_rx = cause_rx_tx & 3382 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); 3383 cause_rx <<= qv->first_rxq; 3384 cause_rx |= qv->pending_cause_rx; 3385 while (cause_rx && budget > 0) { 3386 int count; 3387 struct mvpp2_rx_queue *rxq; 3388 3389 rxq = mvpp2_get_rx_queue(port, cause_rx); 3390 if (!rxq) 3391 break; 3392 3393 count = mvpp2_rx(port, napi, budget, rxq); 3394 rx_done += count; 3395 budget -= count; 3396 if (budget > 0) { 3397 /* Clear the bit associated to this Rx queue 3398 * so that next iteration will continue from 3399 * the next Rx queue. 3400 */ 3401 cause_rx &= ~(1 << rxq->logic_rxq); 3402 } 3403 } 3404 3405 if (budget > 0) { 3406 cause_rx = 0; 3407 napi_complete_done(napi, rx_done); 3408 3409 mvpp2_qvec_interrupt_enable(qv); 3410 } 3411 qv->pending_cause_rx = cause_rx; 3412 return rx_done; 3413 } 3414 3415 static void mvpp22_mode_reconfigure(struct mvpp2_port *port) 3416 { 3417 u32 ctrl3; 3418 3419 /* Set the GMAC & XLG MAC in reset */ 3420 mvpp2_mac_reset_assert(port); 3421 3422 /* Set the MPCS and XPCS in reset */ 3423 mvpp22_pcs_reset_assert(port); 3424 3425 /* comphy reconfiguration */ 3426 mvpp22_comphy_init(port); 3427 3428 /* gop reconfiguration */ 3429 mvpp22_gop_init(port); 3430 3431 mvpp22_pcs_reset_deassert(port); 3432 3433 /* Only GOP port 0 has an XLG MAC */ 3434 if (port->gop_id == 0) { 3435 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); 3436 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3437 3438 if (mvpp2_is_xlg(port->phy_interface)) 3439 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; 3440 else 3441 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 3442 3443 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); 3444 } 3445 3446 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) 3447 mvpp2_xlg_max_rx_size_set(port); 3448 else 3449 mvpp2_gmac_max_rx_size_set(port); 3450 } 3451 3452 /* Set hw internals when starting port */ 3453 static void mvpp2_start_dev(struct mvpp2_port *port) 3454 { 3455 int i; 3456 3457 mvpp2_txp_max_tx_size_set(port); 3458 3459 for (i = 0; i < port->nqvecs; i++) 3460 napi_enable(&port->qvecs[i].napi); 3461 3462 /* Enable interrupts on all threads */ 3463 mvpp2_interrupts_enable(port); 3464 3465 if (port->priv->hw_version == MVPP22) 3466 mvpp22_mode_reconfigure(port); 3467 3468 if (port->phylink) { 3469 phylink_start(port->phylink); 3470 } else { 3471 /* Phylink isn't used as of now for ACPI, so the MAC has to be 3472 * configured manually when the interface is started. This will 3473 * be removed as soon as the phylink ACPI support lands in. 3474 */ 3475 struct phylink_link_state state = { 3476 .interface = port->phy_interface, 3477 }; 3478 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); 3479 mvpp2_mac_link_up(&port->phylink_config, NULL, 3480 MLO_AN_INBAND, port->phy_interface, 3481 SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); 3482 } 3483 3484 netif_tx_start_all_queues(port->dev); 3485 } 3486 3487 /* Set hw internals when stopping port */ 3488 static void mvpp2_stop_dev(struct mvpp2_port *port) 3489 { 3490 int i; 3491 3492 /* Disable interrupts on all threads */ 3493 mvpp2_interrupts_disable(port); 3494 3495 for (i = 0; i < port->nqvecs; i++) 3496 napi_disable(&port->qvecs[i].napi); 3497 3498 if (port->phylink) 3499 phylink_stop(port->phylink); 3500 phy_power_off(port->comphy); 3501 } 3502 3503 static int mvpp2_check_ringparam_valid(struct net_device *dev, 3504 struct ethtool_ringparam *ring) 3505 { 3506 u16 new_rx_pending = ring->rx_pending; 3507 u16 new_tx_pending = ring->tx_pending; 3508 3509 if (ring->rx_pending == 0 || ring->tx_pending == 0) 3510 return -EINVAL; 3511 3512 if (ring->rx_pending > MVPP2_MAX_RXD_MAX) 3513 new_rx_pending = MVPP2_MAX_RXD_MAX; 3514 else if (!IS_ALIGNED(ring->rx_pending, 16)) 3515 new_rx_pending = ALIGN(ring->rx_pending, 16); 3516 3517 if (ring->tx_pending > MVPP2_MAX_TXD_MAX) 3518 new_tx_pending = MVPP2_MAX_TXD_MAX; 3519 else if (!IS_ALIGNED(ring->tx_pending, 32)) 3520 new_tx_pending = ALIGN(ring->tx_pending, 32); 3521 3522 /* The Tx ring size cannot be smaller than the minimum number of 3523 * descriptors needed for TSO. 3524 */ 3525 if (new_tx_pending < MVPP2_MAX_SKB_DESCS) 3526 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); 3527 3528 if (ring->rx_pending != new_rx_pending) { 3529 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", 3530 ring->rx_pending, new_rx_pending); 3531 ring->rx_pending = new_rx_pending; 3532 } 3533 3534 if (ring->tx_pending != new_tx_pending) { 3535 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", 3536 ring->tx_pending, new_tx_pending); 3537 ring->tx_pending = new_tx_pending; 3538 } 3539 3540 return 0; 3541 } 3542 3543 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) 3544 { 3545 u32 mac_addr_l, mac_addr_m, mac_addr_h; 3546 3547 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 3548 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); 3549 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); 3550 addr[0] = (mac_addr_h >> 24) & 0xFF; 3551 addr[1] = (mac_addr_h >> 16) & 0xFF; 3552 addr[2] = (mac_addr_h >> 8) & 0xFF; 3553 addr[3] = mac_addr_h & 0xFF; 3554 addr[4] = mac_addr_m & 0xFF; 3555 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; 3556 } 3557 3558 static int mvpp2_irqs_init(struct mvpp2_port *port) 3559 { 3560 int err, i; 3561 3562 for (i = 0; i < port->nqvecs; i++) { 3563 struct mvpp2_queue_vector *qv = port->qvecs + i; 3564 3565 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { 3566 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); 3567 if (!qv->mask) { 3568 err = -ENOMEM; 3569 goto err; 3570 } 3571 3572 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); 3573 } 3574 3575 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); 3576 if (err) 3577 goto err; 3578 3579 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { 3580 unsigned int cpu; 3581 3582 for_each_present_cpu(cpu) { 3583 if (mvpp2_cpu_to_thread(port->priv, cpu) == 3584 qv->sw_thread_id) 3585 cpumask_set_cpu(cpu, qv->mask); 3586 } 3587 3588 irq_set_affinity_hint(qv->irq, qv->mask); 3589 } 3590 } 3591 3592 return 0; 3593 err: 3594 for (i = 0; i < port->nqvecs; i++) { 3595 struct mvpp2_queue_vector *qv = port->qvecs + i; 3596 3597 irq_set_affinity_hint(qv->irq, NULL); 3598 kfree(qv->mask); 3599 qv->mask = NULL; 3600 free_irq(qv->irq, qv); 3601 } 3602 3603 return err; 3604 } 3605 3606 static void mvpp2_irqs_deinit(struct mvpp2_port *port) 3607 { 3608 int i; 3609 3610 for (i = 0; i < port->nqvecs; i++) { 3611 struct mvpp2_queue_vector *qv = port->qvecs + i; 3612 3613 irq_set_affinity_hint(qv->irq, NULL); 3614 kfree(qv->mask); 3615 qv->mask = NULL; 3616 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); 3617 free_irq(qv->irq, qv); 3618 } 3619 } 3620 3621 static bool mvpp22_rss_is_supported(void) 3622 { 3623 return queue_mode == MVPP2_QDIST_MULTI_MODE; 3624 } 3625 3626 static int mvpp2_open(struct net_device *dev) 3627 { 3628 struct mvpp2_port *port = netdev_priv(dev); 3629 struct mvpp2 *priv = port->priv; 3630 unsigned char mac_bcast[ETH_ALEN] = { 3631 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 3632 bool valid = false; 3633 int err; 3634 3635 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); 3636 if (err) { 3637 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 3638 return err; 3639 } 3640 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); 3641 if (err) { 3642 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); 3643 return err; 3644 } 3645 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); 3646 if (err) { 3647 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); 3648 return err; 3649 } 3650 err = mvpp2_prs_def_flow(port); 3651 if (err) { 3652 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 3653 return err; 3654 } 3655 3656 /* Allocate the Rx/Tx queues */ 3657 err = mvpp2_setup_rxqs(port); 3658 if (err) { 3659 netdev_err(port->dev, "cannot allocate Rx queues\n"); 3660 return err; 3661 } 3662 3663 err = mvpp2_setup_txqs(port); 3664 if (err) { 3665 netdev_err(port->dev, "cannot allocate Tx queues\n"); 3666 goto err_cleanup_rxqs; 3667 } 3668 3669 err = mvpp2_irqs_init(port); 3670 if (err) { 3671 netdev_err(port->dev, "cannot init IRQs\n"); 3672 goto err_cleanup_txqs; 3673 } 3674 3675 /* Phylink isn't supported yet in ACPI mode */ 3676 if (port->of_node) { 3677 err = phylink_of_phy_connect(port->phylink, port->of_node, 0); 3678 if (err) { 3679 netdev_err(port->dev, "could not attach PHY (%d)\n", 3680 err); 3681 goto err_free_irq; 3682 } 3683 3684 valid = true; 3685 } 3686 3687 if (priv->hw_version == MVPP22 && port->link_irq) { 3688 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, 3689 dev->name, port); 3690 if (err) { 3691 netdev_err(port->dev, "cannot request link IRQ %d\n", 3692 port->link_irq); 3693 goto err_free_irq; 3694 } 3695 3696 mvpp22_gop_setup_irq(port); 3697 3698 /* In default link is down */ 3699 netif_carrier_off(port->dev); 3700 3701 valid = true; 3702 } else { 3703 port->link_irq = 0; 3704 } 3705 3706 if (!valid) { 3707 netdev_err(port->dev, 3708 "invalid configuration: no dt or link IRQ"); 3709 goto err_free_irq; 3710 } 3711 3712 /* Unmask interrupts on all CPUs */ 3713 on_each_cpu(mvpp2_interrupts_unmask, port, 1); 3714 mvpp2_shared_interrupt_mask_unmask(port, false); 3715 3716 mvpp2_start_dev(port); 3717 3718 /* Start hardware statistics gathering */ 3719 queue_delayed_work(priv->stats_queue, &port->stats_work, 3720 MVPP2_MIB_COUNTERS_STATS_DELAY); 3721 3722 return 0; 3723 3724 err_free_irq: 3725 mvpp2_irqs_deinit(port); 3726 err_cleanup_txqs: 3727 mvpp2_cleanup_txqs(port); 3728 err_cleanup_rxqs: 3729 mvpp2_cleanup_rxqs(port); 3730 return err; 3731 } 3732 3733 static int mvpp2_stop(struct net_device *dev) 3734 { 3735 struct mvpp2_port *port = netdev_priv(dev); 3736 struct mvpp2_port_pcpu *port_pcpu; 3737 unsigned int thread; 3738 3739 mvpp2_stop_dev(port); 3740 3741 /* Mask interrupts on all threads */ 3742 on_each_cpu(mvpp2_interrupts_mask, port, 1); 3743 mvpp2_shared_interrupt_mask_unmask(port, true); 3744 3745 if (port->phylink) 3746 phylink_disconnect_phy(port->phylink); 3747 if (port->link_irq) 3748 free_irq(port->link_irq, port); 3749 3750 mvpp2_irqs_deinit(port); 3751 if (!port->has_tx_irqs) { 3752 for (thread = 0; thread < port->priv->nthreads; thread++) { 3753 port_pcpu = per_cpu_ptr(port->pcpu, thread); 3754 3755 hrtimer_cancel(&port_pcpu->tx_done_timer); 3756 port_pcpu->timer_scheduled = false; 3757 } 3758 } 3759 mvpp2_cleanup_rxqs(port); 3760 mvpp2_cleanup_txqs(port); 3761 3762 cancel_delayed_work_sync(&port->stats_work); 3763 3764 mvpp2_mac_reset_assert(port); 3765 mvpp22_pcs_reset_assert(port); 3766 3767 return 0; 3768 } 3769 3770 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, 3771 struct netdev_hw_addr_list *list) 3772 { 3773 struct netdev_hw_addr *ha; 3774 int ret; 3775 3776 netdev_hw_addr_list_for_each(ha, list) { 3777 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); 3778 if (ret) 3779 return ret; 3780 } 3781 3782 return 0; 3783 } 3784 3785 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) 3786 { 3787 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 3788 mvpp2_prs_vid_enable_filtering(port); 3789 else 3790 mvpp2_prs_vid_disable_filtering(port); 3791 3792 mvpp2_prs_mac_promisc_set(port->priv, port->id, 3793 MVPP2_PRS_L2_UNI_CAST, enable); 3794 3795 mvpp2_prs_mac_promisc_set(port->priv, port->id, 3796 MVPP2_PRS_L2_MULTI_CAST, enable); 3797 } 3798 3799 static void mvpp2_set_rx_mode(struct net_device *dev) 3800 { 3801 struct mvpp2_port *port = netdev_priv(dev); 3802 3803 /* Clear the whole UC and MC list */ 3804 mvpp2_prs_mac_del_all(port); 3805 3806 if (dev->flags & IFF_PROMISC) { 3807 mvpp2_set_rx_promisc(port, true); 3808 return; 3809 } 3810 3811 mvpp2_set_rx_promisc(port, false); 3812 3813 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || 3814 mvpp2_prs_mac_da_accept_list(port, &dev->uc)) 3815 mvpp2_prs_mac_promisc_set(port->priv, port->id, 3816 MVPP2_PRS_L2_UNI_CAST, true); 3817 3818 if (dev->flags & IFF_ALLMULTI) { 3819 mvpp2_prs_mac_promisc_set(port->priv, port->id, 3820 MVPP2_PRS_L2_MULTI_CAST, true); 3821 return; 3822 } 3823 3824 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || 3825 mvpp2_prs_mac_da_accept_list(port, &dev->mc)) 3826 mvpp2_prs_mac_promisc_set(port->priv, port->id, 3827 MVPP2_PRS_L2_MULTI_CAST, true); 3828 } 3829 3830 static int mvpp2_set_mac_address(struct net_device *dev, void *p) 3831 { 3832 const struct sockaddr *addr = p; 3833 int err; 3834 3835 if (!is_valid_ether_addr(addr->sa_data)) 3836 return -EADDRNOTAVAIL; 3837 3838 err = mvpp2_prs_update_mac_da(dev, addr->sa_data); 3839 if (err) { 3840 /* Reconfigure parser accept the original MAC address */ 3841 mvpp2_prs_update_mac_da(dev, dev->dev_addr); 3842 netdev_err(dev, "failed to change MAC address\n"); 3843 } 3844 return err; 3845 } 3846 3847 /* Shut down all the ports, reconfigure the pools as percpu or shared, 3848 * then bring up again all ports. 3849 */ 3850 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) 3851 { 3852 int numbufs = MVPP2_BM_POOLS_NUM, i; 3853 struct mvpp2_port *port = NULL; 3854 bool status[MVPP2_MAX_PORTS]; 3855 3856 for (i = 0; i < priv->port_count; i++) { 3857 port = priv->port_list[i]; 3858 status[i] = netif_running(port->dev); 3859 if (status[i]) 3860 mvpp2_stop(port->dev); 3861 } 3862 3863 /* nrxqs is the same for all ports */ 3864 if (priv->percpu_pools) 3865 numbufs = port->nrxqs * 2; 3866 3867 for (i = 0; i < numbufs; i++) 3868 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); 3869 3870 devm_kfree(port->dev->dev.parent, priv->bm_pools); 3871 priv->percpu_pools = percpu; 3872 mvpp2_bm_init(port->dev->dev.parent, priv); 3873 3874 for (i = 0; i < priv->port_count; i++) { 3875 port = priv->port_list[i]; 3876 mvpp2_swf_bm_pool_init(port); 3877 if (status[i]) 3878 mvpp2_open(port->dev); 3879 } 3880 3881 return 0; 3882 } 3883 3884 static int mvpp2_change_mtu(struct net_device *dev, int mtu) 3885 { 3886 struct mvpp2_port *port = netdev_priv(dev); 3887 bool running = netif_running(dev); 3888 struct mvpp2 *priv = port->priv; 3889 int err; 3890 3891 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { 3892 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, 3893 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); 3894 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); 3895 } 3896 3897 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { 3898 if (priv->percpu_pools) { 3899 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); 3900 mvpp2_bm_switch_buffers(priv, false); 3901 } 3902 } else { 3903 bool jumbo = false; 3904 int i; 3905 3906 for (i = 0; i < priv->port_count; i++) 3907 if (priv->port_list[i] != port && 3908 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > 3909 MVPP2_BM_LONG_PKT_SIZE) { 3910 jumbo = true; 3911 break; 3912 } 3913 3914 /* No port is using jumbo frames */ 3915 if (!jumbo) { 3916 dev_info(port->dev->dev.parent, 3917 "all ports have a low MTU, switching to per-cpu buffers"); 3918 mvpp2_bm_switch_buffers(priv, true); 3919 } 3920 } 3921 3922 if (running) 3923 mvpp2_stop_dev(port); 3924 3925 err = mvpp2_bm_update_mtu(dev, mtu); 3926 if (err) { 3927 netdev_err(dev, "failed to change MTU\n"); 3928 /* Reconfigure BM to the original MTU */ 3929 mvpp2_bm_update_mtu(dev, dev->mtu); 3930 } else { 3931 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); 3932 } 3933 3934 if (running) { 3935 mvpp2_start_dev(port); 3936 mvpp2_egress_enable(port); 3937 mvpp2_ingress_enable(port); 3938 } 3939 3940 return err; 3941 } 3942 3943 static void 3944 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 3945 { 3946 struct mvpp2_port *port = netdev_priv(dev); 3947 unsigned int start; 3948 unsigned int cpu; 3949 3950 for_each_possible_cpu(cpu) { 3951 struct mvpp2_pcpu_stats *cpu_stats; 3952 u64 rx_packets; 3953 u64 rx_bytes; 3954 u64 tx_packets; 3955 u64 tx_bytes; 3956 3957 cpu_stats = per_cpu_ptr(port->stats, cpu); 3958 do { 3959 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 3960 rx_packets = cpu_stats->rx_packets; 3961 rx_bytes = cpu_stats->rx_bytes; 3962 tx_packets = cpu_stats->tx_packets; 3963 tx_bytes = cpu_stats->tx_bytes; 3964 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 3965 3966 stats->rx_packets += rx_packets; 3967 stats->rx_bytes += rx_bytes; 3968 stats->tx_packets += tx_packets; 3969 stats->tx_bytes += tx_bytes; 3970 } 3971 3972 stats->rx_errors = dev->stats.rx_errors; 3973 stats->rx_dropped = dev->stats.rx_dropped; 3974 stats->tx_dropped = dev->stats.tx_dropped; 3975 } 3976 3977 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 3978 { 3979 struct mvpp2_port *port = netdev_priv(dev); 3980 3981 if (!port->phylink) 3982 return -ENOTSUPP; 3983 3984 return phylink_mii_ioctl(port->phylink, ifr, cmd); 3985 } 3986 3987 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 3988 { 3989 struct mvpp2_port *port = netdev_priv(dev); 3990 int ret; 3991 3992 ret = mvpp2_prs_vid_entry_add(port, vid); 3993 if (ret) 3994 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", 3995 MVPP2_PRS_VLAN_FILT_MAX - 1); 3996 return ret; 3997 } 3998 3999 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 4000 { 4001 struct mvpp2_port *port = netdev_priv(dev); 4002 4003 mvpp2_prs_vid_entry_remove(port, vid); 4004 return 0; 4005 } 4006 4007 static int mvpp2_set_features(struct net_device *dev, 4008 netdev_features_t features) 4009 { 4010 netdev_features_t changed = dev->features ^ features; 4011 struct mvpp2_port *port = netdev_priv(dev); 4012 4013 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { 4014 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 4015 mvpp2_prs_vid_enable_filtering(port); 4016 } else { 4017 /* Invalidate all registered VID filters for this 4018 * port 4019 */ 4020 mvpp2_prs_vid_remove_all(port); 4021 4022 mvpp2_prs_vid_disable_filtering(port); 4023 } 4024 } 4025 4026 if (changed & NETIF_F_RXHASH) { 4027 if (features & NETIF_F_RXHASH) 4028 mvpp22_port_rss_enable(port); 4029 else 4030 mvpp22_port_rss_disable(port); 4031 } 4032 4033 return 0; 4034 } 4035 4036 /* Ethtool methods */ 4037 4038 static int mvpp2_ethtool_nway_reset(struct net_device *dev) 4039 { 4040 struct mvpp2_port *port = netdev_priv(dev); 4041 4042 if (!port->phylink) 4043 return -ENOTSUPP; 4044 4045 return phylink_ethtool_nway_reset(port->phylink); 4046 } 4047 4048 /* Set interrupt coalescing for ethtools */ 4049 static int mvpp2_ethtool_set_coalesce(struct net_device *dev, 4050 struct ethtool_coalesce *c) 4051 { 4052 struct mvpp2_port *port = netdev_priv(dev); 4053 int queue; 4054 4055 for (queue = 0; queue < port->nrxqs; queue++) { 4056 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 4057 4058 rxq->time_coal = c->rx_coalesce_usecs; 4059 rxq->pkts_coal = c->rx_max_coalesced_frames; 4060 mvpp2_rx_pkts_coal_set(port, rxq); 4061 mvpp2_rx_time_coal_set(port, rxq); 4062 } 4063 4064 if (port->has_tx_irqs) { 4065 port->tx_time_coal = c->tx_coalesce_usecs; 4066 mvpp2_tx_time_coal_set(port); 4067 } 4068 4069 for (queue = 0; queue < port->ntxqs; queue++) { 4070 struct mvpp2_tx_queue *txq = port->txqs[queue]; 4071 4072 txq->done_pkts_coal = c->tx_max_coalesced_frames; 4073 4074 if (port->has_tx_irqs) 4075 mvpp2_tx_pkts_coal_set(port, txq); 4076 } 4077 4078 return 0; 4079 } 4080 4081 /* get coalescing for ethtools */ 4082 static int mvpp2_ethtool_get_coalesce(struct net_device *dev, 4083 struct ethtool_coalesce *c) 4084 { 4085 struct mvpp2_port *port = netdev_priv(dev); 4086 4087 c->rx_coalesce_usecs = port->rxqs[0]->time_coal; 4088 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; 4089 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; 4090 c->tx_coalesce_usecs = port->tx_time_coal; 4091 return 0; 4092 } 4093 4094 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, 4095 struct ethtool_drvinfo *drvinfo) 4096 { 4097 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, 4098 sizeof(drvinfo->driver)); 4099 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, 4100 sizeof(drvinfo->version)); 4101 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 4102 sizeof(drvinfo->bus_info)); 4103 } 4104 4105 static void mvpp2_ethtool_get_ringparam(struct net_device *dev, 4106 struct ethtool_ringparam *ring) 4107 { 4108 struct mvpp2_port *port = netdev_priv(dev); 4109 4110 ring->rx_max_pending = MVPP2_MAX_RXD_MAX; 4111 ring->tx_max_pending = MVPP2_MAX_TXD_MAX; 4112 ring->rx_pending = port->rx_ring_size; 4113 ring->tx_pending = port->tx_ring_size; 4114 } 4115 4116 static int mvpp2_ethtool_set_ringparam(struct net_device *dev, 4117 struct ethtool_ringparam *ring) 4118 { 4119 struct mvpp2_port *port = netdev_priv(dev); 4120 u16 prev_rx_ring_size = port->rx_ring_size; 4121 u16 prev_tx_ring_size = port->tx_ring_size; 4122 int err; 4123 4124 err = mvpp2_check_ringparam_valid(dev, ring); 4125 if (err) 4126 return err; 4127 4128 if (!netif_running(dev)) { 4129 port->rx_ring_size = ring->rx_pending; 4130 port->tx_ring_size = ring->tx_pending; 4131 return 0; 4132 } 4133 4134 /* The interface is running, so we have to force a 4135 * reallocation of the queues 4136 */ 4137 mvpp2_stop_dev(port); 4138 mvpp2_cleanup_rxqs(port); 4139 mvpp2_cleanup_txqs(port); 4140 4141 port->rx_ring_size = ring->rx_pending; 4142 port->tx_ring_size = ring->tx_pending; 4143 4144 err = mvpp2_setup_rxqs(port); 4145 if (err) { 4146 /* Reallocate Rx queues with the original ring size */ 4147 port->rx_ring_size = prev_rx_ring_size; 4148 ring->rx_pending = prev_rx_ring_size; 4149 err = mvpp2_setup_rxqs(port); 4150 if (err) 4151 goto err_out; 4152 } 4153 err = mvpp2_setup_txqs(port); 4154 if (err) { 4155 /* Reallocate Tx queues with the original ring size */ 4156 port->tx_ring_size = prev_tx_ring_size; 4157 ring->tx_pending = prev_tx_ring_size; 4158 err = mvpp2_setup_txqs(port); 4159 if (err) 4160 goto err_clean_rxqs; 4161 } 4162 4163 mvpp2_start_dev(port); 4164 mvpp2_egress_enable(port); 4165 mvpp2_ingress_enable(port); 4166 4167 return 0; 4168 4169 err_clean_rxqs: 4170 mvpp2_cleanup_rxqs(port); 4171 err_out: 4172 netdev_err(dev, "failed to change ring parameters"); 4173 return err; 4174 } 4175 4176 static void mvpp2_ethtool_get_pause_param(struct net_device *dev, 4177 struct ethtool_pauseparam *pause) 4178 { 4179 struct mvpp2_port *port = netdev_priv(dev); 4180 4181 if (!port->phylink) 4182 return; 4183 4184 phylink_ethtool_get_pauseparam(port->phylink, pause); 4185 } 4186 4187 static int mvpp2_ethtool_set_pause_param(struct net_device *dev, 4188 struct ethtool_pauseparam *pause) 4189 { 4190 struct mvpp2_port *port = netdev_priv(dev); 4191 4192 if (!port->phylink) 4193 return -ENOTSUPP; 4194 4195 return phylink_ethtool_set_pauseparam(port->phylink, pause); 4196 } 4197 4198 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, 4199 struct ethtool_link_ksettings *cmd) 4200 { 4201 struct mvpp2_port *port = netdev_priv(dev); 4202 4203 if (!port->phylink) 4204 return -ENOTSUPP; 4205 4206 return phylink_ethtool_ksettings_get(port->phylink, cmd); 4207 } 4208 4209 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, 4210 const struct ethtool_link_ksettings *cmd) 4211 { 4212 struct mvpp2_port *port = netdev_priv(dev); 4213 4214 if (!port->phylink) 4215 return -ENOTSUPP; 4216 4217 return phylink_ethtool_ksettings_set(port->phylink, cmd); 4218 } 4219 4220 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, 4221 struct ethtool_rxnfc *info, u32 *rules) 4222 { 4223 struct mvpp2_port *port = netdev_priv(dev); 4224 int ret = 0, i, loc = 0; 4225 4226 if (!mvpp22_rss_is_supported()) 4227 return -EOPNOTSUPP; 4228 4229 switch (info->cmd) { 4230 case ETHTOOL_GRXFH: 4231 ret = mvpp2_ethtool_rxfh_get(port, info); 4232 break; 4233 case ETHTOOL_GRXRINGS: 4234 info->data = port->nrxqs; 4235 break; 4236 case ETHTOOL_GRXCLSRLCNT: 4237 info->rule_cnt = port->n_rfs_rules; 4238 break; 4239 case ETHTOOL_GRXCLSRULE: 4240 ret = mvpp2_ethtool_cls_rule_get(port, info); 4241 break; 4242 case ETHTOOL_GRXCLSRLALL: 4243 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { 4244 if (port->rfs_rules[i]) 4245 rules[loc++] = i; 4246 } 4247 break; 4248 default: 4249 return -ENOTSUPP; 4250 } 4251 4252 return ret; 4253 } 4254 4255 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, 4256 struct ethtool_rxnfc *info) 4257 { 4258 struct mvpp2_port *port = netdev_priv(dev); 4259 int ret = 0; 4260 4261 if (!mvpp22_rss_is_supported()) 4262 return -EOPNOTSUPP; 4263 4264 switch (info->cmd) { 4265 case ETHTOOL_SRXFH: 4266 ret = mvpp2_ethtool_rxfh_set(port, info); 4267 break; 4268 case ETHTOOL_SRXCLSRLINS: 4269 ret = mvpp2_ethtool_cls_rule_ins(port, info); 4270 break; 4271 case ETHTOOL_SRXCLSRLDEL: 4272 ret = mvpp2_ethtool_cls_rule_del(port, info); 4273 break; 4274 default: 4275 return -EOPNOTSUPP; 4276 } 4277 return ret; 4278 } 4279 4280 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) 4281 { 4282 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0; 4283 } 4284 4285 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 4286 u8 *hfunc) 4287 { 4288 struct mvpp2_port *port = netdev_priv(dev); 4289 int ret = 0; 4290 4291 if (!mvpp22_rss_is_supported()) 4292 return -EOPNOTSUPP; 4293 4294 if (indir) 4295 ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir); 4296 4297 if (hfunc) 4298 *hfunc = ETH_RSS_HASH_CRC32; 4299 4300 return ret; 4301 } 4302 4303 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, 4304 const u8 *key, const u8 hfunc) 4305 { 4306 struct mvpp2_port *port = netdev_priv(dev); 4307 int ret = 0; 4308 4309 if (!mvpp22_rss_is_supported()) 4310 return -EOPNOTSUPP; 4311 4312 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) 4313 return -EOPNOTSUPP; 4314 4315 if (key) 4316 return -EOPNOTSUPP; 4317 4318 if (indir) 4319 ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir); 4320 4321 return ret; 4322 } 4323 4324 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, 4325 u8 *key, u8 *hfunc, u32 rss_context) 4326 { 4327 struct mvpp2_port *port = netdev_priv(dev); 4328 int ret = 0; 4329 4330 if (!mvpp22_rss_is_supported()) 4331 return -EOPNOTSUPP; 4332 if (rss_context >= MVPP22_N_RSS_TABLES) 4333 return -EINVAL; 4334 4335 if (hfunc) 4336 *hfunc = ETH_RSS_HASH_CRC32; 4337 4338 if (indir) 4339 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir); 4340 4341 return ret; 4342 } 4343 4344 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev, 4345 const u32 *indir, const u8 *key, 4346 const u8 hfunc, u32 *rss_context, 4347 bool delete) 4348 { 4349 struct mvpp2_port *port = netdev_priv(dev); 4350 int ret; 4351 4352 if (!mvpp22_rss_is_supported()) 4353 return -EOPNOTSUPP; 4354 4355 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) 4356 return -EOPNOTSUPP; 4357 4358 if (key) 4359 return -EOPNOTSUPP; 4360 4361 if (delete) 4362 return mvpp22_port_rss_ctx_delete(port, *rss_context); 4363 4364 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { 4365 ret = mvpp22_port_rss_ctx_create(port, rss_context); 4366 if (ret) 4367 return ret; 4368 } 4369 4370 return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir); 4371 } 4372 /* Device ops */ 4373 4374 static const struct net_device_ops mvpp2_netdev_ops = { 4375 .ndo_open = mvpp2_open, 4376 .ndo_stop = mvpp2_stop, 4377 .ndo_start_xmit = mvpp2_tx, 4378 .ndo_set_rx_mode = mvpp2_set_rx_mode, 4379 .ndo_set_mac_address = mvpp2_set_mac_address, 4380 .ndo_change_mtu = mvpp2_change_mtu, 4381 .ndo_get_stats64 = mvpp2_get_stats64, 4382 .ndo_do_ioctl = mvpp2_ioctl, 4383 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, 4384 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, 4385 .ndo_set_features = mvpp2_set_features, 4386 }; 4387 4388 static const struct ethtool_ops mvpp2_eth_tool_ops = { 4389 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 4390 ETHTOOL_COALESCE_MAX_FRAMES, 4391 .nway_reset = mvpp2_ethtool_nway_reset, 4392 .get_link = ethtool_op_get_link, 4393 .set_coalesce = mvpp2_ethtool_set_coalesce, 4394 .get_coalesce = mvpp2_ethtool_get_coalesce, 4395 .get_drvinfo = mvpp2_ethtool_get_drvinfo, 4396 .get_ringparam = mvpp2_ethtool_get_ringparam, 4397 .set_ringparam = mvpp2_ethtool_set_ringparam, 4398 .get_strings = mvpp2_ethtool_get_strings, 4399 .get_ethtool_stats = mvpp2_ethtool_get_stats, 4400 .get_sset_count = mvpp2_ethtool_get_sset_count, 4401 .get_pauseparam = mvpp2_ethtool_get_pause_param, 4402 .set_pauseparam = mvpp2_ethtool_set_pause_param, 4403 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, 4404 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, 4405 .get_rxnfc = mvpp2_ethtool_get_rxnfc, 4406 .set_rxnfc = mvpp2_ethtool_set_rxnfc, 4407 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, 4408 .get_rxfh = mvpp2_ethtool_get_rxfh, 4409 .set_rxfh = mvpp2_ethtool_set_rxfh, 4410 .get_rxfh_context = mvpp2_ethtool_get_rxfh_context, 4411 .set_rxfh_context = mvpp2_ethtool_set_rxfh_context, 4412 }; 4413 4414 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that 4415 * had a single IRQ defined per-port. 4416 */ 4417 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, 4418 struct device_node *port_node) 4419 { 4420 struct mvpp2_queue_vector *v = &port->qvecs[0]; 4421 4422 v->first_rxq = 0; 4423 v->nrxqs = port->nrxqs; 4424 v->type = MVPP2_QUEUE_VECTOR_SHARED; 4425 v->sw_thread_id = 0; 4426 v->sw_thread_mask = *cpumask_bits(cpu_online_mask); 4427 v->port = port; 4428 v->irq = irq_of_parse_and_map(port_node, 0); 4429 if (v->irq <= 0) 4430 return -EINVAL; 4431 netif_napi_add(port->dev, &v->napi, mvpp2_poll, 4432 NAPI_POLL_WEIGHT); 4433 4434 port->nqvecs = 1; 4435 4436 return 0; 4437 } 4438 4439 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, 4440 struct device_node *port_node) 4441 { 4442 struct mvpp2 *priv = port->priv; 4443 struct mvpp2_queue_vector *v; 4444 int i, ret; 4445 4446 switch (queue_mode) { 4447 case MVPP2_QDIST_SINGLE_MODE: 4448 port->nqvecs = priv->nthreads + 1; 4449 break; 4450 case MVPP2_QDIST_MULTI_MODE: 4451 port->nqvecs = priv->nthreads; 4452 break; 4453 } 4454 4455 for (i = 0; i < port->nqvecs; i++) { 4456 char irqname[16]; 4457 4458 v = port->qvecs + i; 4459 4460 v->port = port; 4461 v->type = MVPP2_QUEUE_VECTOR_PRIVATE; 4462 v->sw_thread_id = i; 4463 v->sw_thread_mask = BIT(i); 4464 4465 if (port->flags & MVPP2_F_DT_COMPAT) 4466 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); 4467 else 4468 snprintf(irqname, sizeof(irqname), "hif%d", i); 4469 4470 if (queue_mode == MVPP2_QDIST_MULTI_MODE) { 4471 v->first_rxq = i; 4472 v->nrxqs = 1; 4473 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && 4474 i == (port->nqvecs - 1)) { 4475 v->first_rxq = 0; 4476 v->nrxqs = port->nrxqs; 4477 v->type = MVPP2_QUEUE_VECTOR_SHARED; 4478 4479 if (port->flags & MVPP2_F_DT_COMPAT) 4480 strncpy(irqname, "rx-shared", sizeof(irqname)); 4481 } 4482 4483 if (port_node) 4484 v->irq = of_irq_get_byname(port_node, irqname); 4485 else 4486 v->irq = fwnode_irq_get(port->fwnode, i); 4487 if (v->irq <= 0) { 4488 ret = -EINVAL; 4489 goto err; 4490 } 4491 4492 netif_napi_add(port->dev, &v->napi, mvpp2_poll, 4493 NAPI_POLL_WEIGHT); 4494 } 4495 4496 return 0; 4497 4498 err: 4499 for (i = 0; i < port->nqvecs; i++) 4500 irq_dispose_mapping(port->qvecs[i].irq); 4501 return ret; 4502 } 4503 4504 static int mvpp2_queue_vectors_init(struct mvpp2_port *port, 4505 struct device_node *port_node) 4506 { 4507 if (port->has_tx_irqs) 4508 return mvpp2_multi_queue_vectors_init(port, port_node); 4509 else 4510 return mvpp2_simple_queue_vectors_init(port, port_node); 4511 } 4512 4513 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) 4514 { 4515 int i; 4516 4517 for (i = 0; i < port->nqvecs; i++) 4518 irq_dispose_mapping(port->qvecs[i].irq); 4519 } 4520 4521 /* Configure Rx queue group interrupt for this port */ 4522 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) 4523 { 4524 struct mvpp2 *priv = port->priv; 4525 u32 val; 4526 int i; 4527 4528 if (priv->hw_version == MVPP21) { 4529 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), 4530 port->nrxqs); 4531 return; 4532 } 4533 4534 /* Handle the more complicated PPv2.2 case */ 4535 for (i = 0; i < port->nqvecs; i++) { 4536 struct mvpp2_queue_vector *qv = port->qvecs + i; 4537 4538 if (!qv->nrxqs) 4539 continue; 4540 4541 val = qv->sw_thread_id; 4542 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; 4543 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 4544 4545 val = qv->first_rxq; 4546 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; 4547 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 4548 } 4549 } 4550 4551 /* Initialize port HW */ 4552 static int mvpp2_port_init(struct mvpp2_port *port) 4553 { 4554 struct device *dev = port->dev->dev.parent; 4555 struct mvpp2 *priv = port->priv; 4556 struct mvpp2_txq_pcpu *txq_pcpu; 4557 unsigned int thread; 4558 int queue, err; 4559 4560 /* Checks for hardware constraints */ 4561 if (port->first_rxq + port->nrxqs > 4562 MVPP2_MAX_PORTS * priv->max_port_rxqs) 4563 return -EINVAL; 4564 4565 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) 4566 return -EINVAL; 4567 4568 /* Disable port */ 4569 mvpp2_egress_disable(port); 4570 mvpp2_port_disable(port); 4571 4572 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; 4573 4574 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), 4575 GFP_KERNEL); 4576 if (!port->txqs) 4577 return -ENOMEM; 4578 4579 /* Associate physical Tx queues to this port and initialize. 4580 * The mapping is predefined. 4581 */ 4582 for (queue = 0; queue < port->ntxqs; queue++) { 4583 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 4584 struct mvpp2_tx_queue *txq; 4585 4586 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 4587 if (!txq) { 4588 err = -ENOMEM; 4589 goto err_free_percpu; 4590 } 4591 4592 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); 4593 if (!txq->pcpu) { 4594 err = -ENOMEM; 4595 goto err_free_percpu; 4596 } 4597 4598 txq->id = queue_phy_id; 4599 txq->log_id = queue; 4600 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 4601 for (thread = 0; thread < priv->nthreads; thread++) { 4602 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 4603 txq_pcpu->thread = thread; 4604 } 4605 4606 port->txqs[queue] = txq; 4607 } 4608 4609 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), 4610 GFP_KERNEL); 4611 if (!port->rxqs) { 4612 err = -ENOMEM; 4613 goto err_free_percpu; 4614 } 4615 4616 /* Allocate and initialize Rx queue for this port */ 4617 for (queue = 0; queue < port->nrxqs; queue++) { 4618 struct mvpp2_rx_queue *rxq; 4619 4620 /* Map physical Rx queue to port's logical Rx queue */ 4621 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 4622 if (!rxq) { 4623 err = -ENOMEM; 4624 goto err_free_percpu; 4625 } 4626 /* Map this Rx queue to a physical queue */ 4627 rxq->id = port->first_rxq + queue; 4628 rxq->port = port->id; 4629 rxq->logic_rxq = queue; 4630 4631 port->rxqs[queue] = rxq; 4632 } 4633 4634 mvpp2_rx_irqs_setup(port); 4635 4636 /* Create Rx descriptor rings */ 4637 for (queue = 0; queue < port->nrxqs; queue++) { 4638 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 4639 4640 rxq->size = port->rx_ring_size; 4641 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 4642 rxq->time_coal = MVPP2_RX_COAL_USEC; 4643 } 4644 4645 mvpp2_ingress_disable(port); 4646 4647 /* Port default configuration */ 4648 mvpp2_defaults_set(port); 4649 4650 /* Port's classifier configuration */ 4651 mvpp2_cls_oversize_rxq_set(port); 4652 mvpp2_cls_port_config(port); 4653 4654 if (mvpp22_rss_is_supported()) 4655 mvpp22_port_rss_init(port); 4656 4657 /* Provide an initial Rx packet size */ 4658 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); 4659 4660 /* Initialize pools for swf */ 4661 err = mvpp2_swf_bm_pool_init(port); 4662 if (err) 4663 goto err_free_percpu; 4664 4665 /* Clear all port stats */ 4666 mvpp2_read_stats(port); 4667 memset(port->ethtool_stats, 0, 4668 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); 4669 4670 return 0; 4671 4672 err_free_percpu: 4673 for (queue = 0; queue < port->ntxqs; queue++) { 4674 if (!port->txqs[queue]) 4675 continue; 4676 free_percpu(port->txqs[queue]->pcpu); 4677 } 4678 return err; 4679 } 4680 4681 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, 4682 unsigned long *flags) 4683 { 4684 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", 4685 "tx-cpu3" }; 4686 int i; 4687 4688 for (i = 0; i < 5; i++) 4689 if (of_property_match_string(port_node, "interrupt-names", 4690 irqs[i]) < 0) 4691 return false; 4692 4693 *flags |= MVPP2_F_DT_COMPAT; 4694 return true; 4695 } 4696 4697 /* Checks if the port dt description has the required Tx interrupts: 4698 * - PPv2.1: there are no such interrupts. 4699 * - PPv2.2: 4700 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] 4701 * - The new ones have: "hifX" with X in [0..8] 4702 * 4703 * All those variants are supported to keep the backward compatibility. 4704 */ 4705 static bool mvpp2_port_has_irqs(struct mvpp2 *priv, 4706 struct device_node *port_node, 4707 unsigned long *flags) 4708 { 4709 char name[5]; 4710 int i; 4711 4712 /* ACPI */ 4713 if (!port_node) 4714 return true; 4715 4716 if (priv->hw_version == MVPP21) 4717 return false; 4718 4719 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) 4720 return true; 4721 4722 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 4723 snprintf(name, 5, "hif%d", i); 4724 if (of_property_match_string(port_node, "interrupt-names", 4725 name) < 0) 4726 return false; 4727 } 4728 4729 return true; 4730 } 4731 4732 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, 4733 struct fwnode_handle *fwnode, 4734 char **mac_from) 4735 { 4736 struct mvpp2_port *port = netdev_priv(dev); 4737 char hw_mac_addr[ETH_ALEN] = {0}; 4738 char fw_mac_addr[ETH_ALEN]; 4739 4740 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { 4741 *mac_from = "firmware node"; 4742 ether_addr_copy(dev->dev_addr, fw_mac_addr); 4743 return; 4744 } 4745 4746 if (priv->hw_version == MVPP21) { 4747 mvpp21_get_mac_address(port, hw_mac_addr); 4748 if (is_valid_ether_addr(hw_mac_addr)) { 4749 *mac_from = "hardware"; 4750 ether_addr_copy(dev->dev_addr, hw_mac_addr); 4751 return; 4752 } 4753 } 4754 4755 *mac_from = "random"; 4756 eth_hw_addr_random(dev); 4757 } 4758 4759 static void mvpp2_phylink_validate(struct phylink_config *config, 4760 unsigned long *supported, 4761 struct phylink_link_state *state) 4762 { 4763 struct mvpp2_port *port = container_of(config, struct mvpp2_port, 4764 phylink_config); 4765 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 4766 4767 /* Invalid combinations */ 4768 switch (state->interface) { 4769 case PHY_INTERFACE_MODE_10GBASER: 4770 case PHY_INTERFACE_MODE_XAUI: 4771 if (port->gop_id != 0) 4772 goto empty_set; 4773 break; 4774 case PHY_INTERFACE_MODE_RGMII: 4775 case PHY_INTERFACE_MODE_RGMII_ID: 4776 case PHY_INTERFACE_MODE_RGMII_RXID: 4777 case PHY_INTERFACE_MODE_RGMII_TXID: 4778 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) 4779 goto empty_set; 4780 break; 4781 default: 4782 break; 4783 } 4784 4785 phylink_set(mask, Autoneg); 4786 phylink_set_port_modes(mask); 4787 phylink_set(mask, Pause); 4788 phylink_set(mask, Asym_Pause); 4789 4790 switch (state->interface) { 4791 case PHY_INTERFACE_MODE_10GBASER: 4792 case PHY_INTERFACE_MODE_XAUI: 4793 case PHY_INTERFACE_MODE_NA: 4794 if (port->gop_id == 0) { 4795 phylink_set(mask, 10000baseT_Full); 4796 phylink_set(mask, 10000baseCR_Full); 4797 phylink_set(mask, 10000baseSR_Full); 4798 phylink_set(mask, 10000baseLR_Full); 4799 phylink_set(mask, 10000baseLRM_Full); 4800 phylink_set(mask, 10000baseER_Full); 4801 phylink_set(mask, 10000baseKR_Full); 4802 } 4803 if (state->interface != PHY_INTERFACE_MODE_NA) 4804 break; 4805 /* Fall-through */ 4806 case PHY_INTERFACE_MODE_RGMII: 4807 case PHY_INTERFACE_MODE_RGMII_ID: 4808 case PHY_INTERFACE_MODE_RGMII_RXID: 4809 case PHY_INTERFACE_MODE_RGMII_TXID: 4810 case PHY_INTERFACE_MODE_SGMII: 4811 phylink_set(mask, 10baseT_Half); 4812 phylink_set(mask, 10baseT_Full); 4813 phylink_set(mask, 100baseT_Half); 4814 phylink_set(mask, 100baseT_Full); 4815 phylink_set(mask, 1000baseT_Full); 4816 phylink_set(mask, 1000baseX_Full); 4817 if (state->interface != PHY_INTERFACE_MODE_NA) 4818 break; 4819 /* Fall-through */ 4820 case PHY_INTERFACE_MODE_1000BASEX: 4821 case PHY_INTERFACE_MODE_2500BASEX: 4822 if (port->comphy || 4823 state->interface != PHY_INTERFACE_MODE_2500BASEX) { 4824 phylink_set(mask, 1000baseT_Full); 4825 phylink_set(mask, 1000baseX_Full); 4826 } 4827 if (port->comphy || 4828 state->interface == PHY_INTERFACE_MODE_2500BASEX) { 4829 phylink_set(mask, 2500baseT_Full); 4830 phylink_set(mask, 2500baseX_Full); 4831 } 4832 break; 4833 default: 4834 goto empty_set; 4835 } 4836 4837 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 4838 bitmap_and(state->advertising, state->advertising, mask, 4839 __ETHTOOL_LINK_MODE_MASK_NBITS); 4840 4841 phylink_helper_basex_speed(state); 4842 return; 4843 4844 empty_set: 4845 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 4846 } 4847 4848 static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port, 4849 struct phylink_link_state *state) 4850 { 4851 u32 val; 4852 4853 state->speed = SPEED_10000; 4854 state->duplex = 1; 4855 state->an_complete = 1; 4856 4857 val = readl(port->base + MVPP22_XLG_STATUS); 4858 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); 4859 4860 state->pause = 0; 4861 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 4862 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) 4863 state->pause |= MLO_PAUSE_TX; 4864 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) 4865 state->pause |= MLO_PAUSE_RX; 4866 } 4867 4868 static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port, 4869 struct phylink_link_state *state) 4870 { 4871 u32 val; 4872 4873 val = readl(port->base + MVPP2_GMAC_STATUS0); 4874 4875 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); 4876 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); 4877 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); 4878 4879 switch (port->phy_interface) { 4880 case PHY_INTERFACE_MODE_1000BASEX: 4881 state->speed = SPEED_1000; 4882 break; 4883 case PHY_INTERFACE_MODE_2500BASEX: 4884 state->speed = SPEED_2500; 4885 break; 4886 default: 4887 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) 4888 state->speed = SPEED_1000; 4889 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) 4890 state->speed = SPEED_100; 4891 else 4892 state->speed = SPEED_10; 4893 } 4894 4895 state->pause = 0; 4896 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) 4897 state->pause |= MLO_PAUSE_RX; 4898 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) 4899 state->pause |= MLO_PAUSE_TX; 4900 } 4901 4902 static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config, 4903 struct phylink_link_state *state) 4904 { 4905 struct mvpp2_port *port = container_of(config, struct mvpp2_port, 4906 phylink_config); 4907 4908 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { 4909 u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); 4910 mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 4911 4912 if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) { 4913 mvpp22_xlg_pcs_get_state(port, state); 4914 return; 4915 } 4916 } 4917 4918 mvpp2_gmac_pcs_get_state(port, state); 4919 } 4920 4921 static void mvpp2_mac_an_restart(struct phylink_config *config) 4922 { 4923 struct mvpp2_port *port = container_of(config, struct mvpp2_port, 4924 phylink_config); 4925 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4926 4927 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, 4928 port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4929 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, 4930 port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4931 } 4932 4933 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, 4934 const struct phylink_link_state *state) 4935 { 4936 u32 old_ctrl0, ctrl0; 4937 u32 old_ctrl4, ctrl4; 4938 4939 old_ctrl0 = ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); 4940 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); 4941 4942 ctrl0 |= MVPP22_XLG_CTRL0_MAC_RESET_DIS; 4943 4944 if (state->pause & MLO_PAUSE_TX) 4945 ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; 4946 else 4947 ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; 4948 4949 if (state->pause & MLO_PAUSE_RX) 4950 ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; 4951 else 4952 ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; 4953 4954 ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | 4955 MVPP22_XLG_CTRL4_EN_IDLE_CHECK); 4956 ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; 4957 4958 if (old_ctrl0 != ctrl0) 4959 writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); 4960 if (old_ctrl4 != ctrl4) 4961 writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); 4962 4963 if (!(old_ctrl0 & MVPP22_XLG_CTRL0_MAC_RESET_DIS)) { 4964 while (!(readl(port->base + MVPP22_XLG_CTRL0_REG) & 4965 MVPP22_XLG_CTRL0_MAC_RESET_DIS)) 4966 continue; 4967 } 4968 } 4969 4970 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, 4971 const struct phylink_link_state *state) 4972 { 4973 u32 old_an, an; 4974 u32 old_ctrl0, ctrl0; 4975 u32 old_ctrl2, ctrl2; 4976 u32 old_ctrl4, ctrl4; 4977 4978 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4979 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 4980 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 4981 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); 4982 4983 an &= ~(MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | 4984 MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | 4985 MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_IN_BAND_AUTONEG | 4986 MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS); 4987 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; 4988 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK | 4989 MVPP2_GMAC_PCS_ENABLE_MASK); 4990 4991 /* Configure port type */ 4992 if (phy_interface_mode_is_8023z(state->interface)) { 4993 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; 4994 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 4995 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | 4996 MVPP22_CTRL4_DP_CLK_SEL | 4997 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 4998 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 4999 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; 5000 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 5001 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | 5002 MVPP22_CTRL4_DP_CLK_SEL | 5003 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 5004 } else if (phy_interface_mode_is_rgmii(state->interface)) { 5005 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; 5006 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | 5007 MVPP22_CTRL4_SYNC_BYPASS_DIS | 5008 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 5009 } 5010 5011 /* Configure advertisement bits */ 5012 if (phylink_test(state->advertising, Pause)) 5013 an |= MVPP2_GMAC_FC_ADV_EN; 5014 if (phylink_test(state->advertising, Asym_Pause)) 5015 an |= MVPP2_GMAC_FC_ADV_ASM_EN; 5016 5017 /* Configure negotiation style */ 5018 if (!phylink_autoneg_inband(mode)) { 5019 /* Phy or fixed speed - no in-band AN, nothing to do, leave the 5020 * configured speed, duplex and flow control as-is. 5021 */ 5022 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 5023 /* SGMII in-band mode receives the speed and duplex from 5024 * the PHY. Flow control information is not received. */ 5025 an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | 5026 MVPP2_GMAC_FORCE_LINK_PASS | 5027 MVPP2_GMAC_CONFIG_MII_SPEED | 5028 MVPP2_GMAC_CONFIG_GMII_SPEED | 5029 MVPP2_GMAC_CONFIG_FULL_DUPLEX); 5030 an |= MVPP2_GMAC_IN_BAND_AUTONEG | 5031 MVPP2_GMAC_AN_SPEED_EN | 5032 MVPP2_GMAC_AN_DUPLEX_EN; 5033 } else if (phy_interface_mode_is_8023z(state->interface)) { 5034 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can 5035 * they negotiate duplex: they are always operating with a fixed 5036 * speed of 1000/2500Mbps in full duplex, so force 1000/2500 5037 * speed and full duplex here. 5038 */ 5039 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; 5040 an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | 5041 MVPP2_GMAC_FORCE_LINK_PASS | 5042 MVPP2_GMAC_CONFIG_MII_SPEED | 5043 MVPP2_GMAC_CONFIG_GMII_SPEED | 5044 MVPP2_GMAC_CONFIG_FULL_DUPLEX); 5045 an |= MVPP2_GMAC_IN_BAND_AUTONEG | 5046 MVPP2_GMAC_CONFIG_GMII_SPEED | 5047 MVPP2_GMAC_CONFIG_FULL_DUPLEX; 5048 5049 if (state->pause & MLO_PAUSE_AN && state->an_enabled) 5050 an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; 5051 } 5052 5053 /* Some fields of the auto-negotiation register require the port to be down when 5054 * their value is updated. 5055 */ 5056 #define MVPP2_GMAC_AN_PORT_DOWN_MASK \ 5057 (MVPP2_GMAC_IN_BAND_AUTONEG | \ 5058 MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \ 5059 MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \ 5060 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \ 5061 MVPP2_GMAC_AN_DUPLEX_EN) 5062 5063 if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK || 5064 (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK || 5065 (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) { 5066 /* Force link down */ 5067 old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS; 5068 old_an |= MVPP2_GMAC_FORCE_LINK_DOWN; 5069 writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5070 5071 /* Set the GMAC in a reset state - do this in a way that 5072 * ensures we clear it below. 5073 */ 5074 old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; 5075 writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); 5076 } 5077 5078 if (old_ctrl0 != ctrl0) 5079 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); 5080 if (old_ctrl2 != ctrl2) 5081 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); 5082 if (old_ctrl4 != ctrl4) 5083 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); 5084 if (old_an != an) 5085 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5086 5087 if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) { 5088 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 5089 MVPP2_GMAC_PORT_RESET_MASK) 5090 continue; 5091 } 5092 } 5093 5094 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, 5095 const struct phylink_link_state *state) 5096 { 5097 struct net_device *dev = to_net_dev(config->dev); 5098 struct mvpp2_port *port = netdev_priv(dev); 5099 bool change_interface = port->phy_interface != state->interface; 5100 5101 /* Check for invalid configuration */ 5102 if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) { 5103 netdev_err(dev, "Invalid mode on %s\n", dev->name); 5104 return; 5105 } 5106 5107 /* Make sure the port is disabled when reconfiguring the mode */ 5108 mvpp2_port_disable(port); 5109 5110 if (port->priv->hw_version == MVPP22 && change_interface) { 5111 mvpp22_gop_mask_irq(port); 5112 5113 port->phy_interface = state->interface; 5114 5115 /* Reconfigure the serdes lanes */ 5116 phy_power_off(port->comphy); 5117 mvpp22_mode_reconfigure(port); 5118 } 5119 5120 /* mac (re)configuration */ 5121 if (mvpp2_is_xlg(state->interface)) 5122 mvpp2_xlg_config(port, mode, state); 5123 else if (phy_interface_mode_is_rgmii(state->interface) || 5124 phy_interface_mode_is_8023z(state->interface) || 5125 state->interface == PHY_INTERFACE_MODE_SGMII) 5126 mvpp2_gmac_config(port, mode, state); 5127 5128 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) 5129 mvpp2_port_loopback_set(port, state); 5130 5131 if (port->priv->hw_version == MVPP22 && change_interface) 5132 mvpp22_gop_unmask_irq(port); 5133 5134 mvpp2_port_enable(port); 5135 } 5136 5137 static void mvpp2_mac_link_up(struct phylink_config *config, 5138 struct phy_device *phy, 5139 unsigned int mode, phy_interface_t interface, 5140 int speed, int duplex, 5141 bool tx_pause, bool rx_pause) 5142 { 5143 struct net_device *dev = to_net_dev(config->dev); 5144 struct mvpp2_port *port = netdev_priv(dev); 5145 u32 val; 5146 5147 if (mvpp2_is_xlg(interface)) { 5148 if (!phylink_autoneg_inband(mode)) { 5149 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 5150 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; 5151 val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 5152 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 5153 } 5154 } else { 5155 if (!phylink_autoneg_inband(mode)) { 5156 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5157 val &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | 5158 MVPP2_GMAC_CONFIG_MII_SPEED | 5159 MVPP2_GMAC_CONFIG_GMII_SPEED | 5160 MVPP2_GMAC_CONFIG_FULL_DUPLEX); 5161 val |= MVPP2_GMAC_FORCE_LINK_PASS; 5162 5163 if (speed == SPEED_1000 || speed == SPEED_2500) 5164 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 5165 else if (speed == SPEED_100) 5166 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 5167 5168 if (duplex == DUPLEX_FULL) 5169 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 5170 5171 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5172 } 5173 5174 /* We can always update the flow control enable bits; 5175 * these will only be effective if flow control AN 5176 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. 5177 */ 5178 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); 5179 val &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); 5180 if (tx_pause) 5181 val |= MVPP22_CTRL4_TX_FC_EN; 5182 if (rx_pause) 5183 val |= MVPP22_CTRL4_RX_FC_EN; 5184 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); 5185 } 5186 5187 mvpp2_port_enable(port); 5188 5189 mvpp2_egress_enable(port); 5190 mvpp2_ingress_enable(port); 5191 netif_tx_wake_all_queues(dev); 5192 } 5193 5194 static void mvpp2_mac_link_down(struct phylink_config *config, 5195 unsigned int mode, phy_interface_t interface) 5196 { 5197 struct net_device *dev = to_net_dev(config->dev); 5198 struct mvpp2_port *port = netdev_priv(dev); 5199 u32 val; 5200 5201 if (!phylink_autoneg_inband(mode)) { 5202 if (mvpp2_is_xlg(interface)) { 5203 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 5204 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 5205 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; 5206 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 5207 } else { 5208 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5209 val &= ~MVPP2_GMAC_FORCE_LINK_PASS; 5210 val |= MVPP2_GMAC_FORCE_LINK_DOWN; 5211 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5212 } 5213 } 5214 5215 netif_tx_stop_all_queues(dev); 5216 mvpp2_egress_disable(port); 5217 mvpp2_ingress_disable(port); 5218 5219 mvpp2_port_disable(port); 5220 } 5221 5222 static const struct phylink_mac_ops mvpp2_phylink_ops = { 5223 .validate = mvpp2_phylink_validate, 5224 .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state, 5225 .mac_an_restart = mvpp2_mac_an_restart, 5226 .mac_config = mvpp2_mac_config, 5227 .mac_link_up = mvpp2_mac_link_up, 5228 .mac_link_down = mvpp2_mac_link_down, 5229 }; 5230 5231 /* Ports initialization */ 5232 static int mvpp2_port_probe(struct platform_device *pdev, 5233 struct fwnode_handle *port_fwnode, 5234 struct mvpp2 *priv) 5235 { 5236 struct phy *comphy = NULL; 5237 struct mvpp2_port *port; 5238 struct mvpp2_port_pcpu *port_pcpu; 5239 struct device_node *port_node = to_of_node(port_fwnode); 5240 netdev_features_t features; 5241 struct net_device *dev; 5242 struct phylink *phylink; 5243 char *mac_from = ""; 5244 unsigned int ntxqs, nrxqs, thread; 5245 unsigned long flags = 0; 5246 bool has_tx_irqs; 5247 u32 id; 5248 int phy_mode; 5249 int err, i; 5250 5251 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); 5252 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { 5253 dev_err(&pdev->dev, 5254 "not enough IRQs to support multi queue mode\n"); 5255 return -EINVAL; 5256 } 5257 5258 ntxqs = MVPP2_MAX_TXQ; 5259 nrxqs = mvpp2_get_nrxqs(priv); 5260 5261 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); 5262 if (!dev) 5263 return -ENOMEM; 5264 5265 phy_mode = fwnode_get_phy_mode(port_fwnode); 5266 if (phy_mode < 0) { 5267 dev_err(&pdev->dev, "incorrect phy mode\n"); 5268 err = phy_mode; 5269 goto err_free_netdev; 5270 } 5271 5272 /* 5273 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT. 5274 * Existing usage of 10GBASE-KR is not correct; no backplane 5275 * negotiation is done, and this driver does not actually support 5276 * 10GBASE-KR. 5277 */ 5278 if (phy_mode == PHY_INTERFACE_MODE_10GKR) 5279 phy_mode = PHY_INTERFACE_MODE_10GBASER; 5280 5281 if (port_node) { 5282 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); 5283 if (IS_ERR(comphy)) { 5284 if (PTR_ERR(comphy) == -EPROBE_DEFER) { 5285 err = -EPROBE_DEFER; 5286 goto err_free_netdev; 5287 } 5288 comphy = NULL; 5289 } 5290 } 5291 5292 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { 5293 err = -EINVAL; 5294 dev_err(&pdev->dev, "missing port-id value\n"); 5295 goto err_free_netdev; 5296 } 5297 5298 dev->tx_queue_len = MVPP2_MAX_TXD_MAX; 5299 dev->watchdog_timeo = 5 * HZ; 5300 dev->netdev_ops = &mvpp2_netdev_ops; 5301 dev->ethtool_ops = &mvpp2_eth_tool_ops; 5302 5303 port = netdev_priv(dev); 5304 port->dev = dev; 5305 port->fwnode = port_fwnode; 5306 port->has_phy = !!of_find_property(port_node, "phy", NULL); 5307 port->ntxqs = ntxqs; 5308 port->nrxqs = nrxqs; 5309 port->priv = priv; 5310 port->has_tx_irqs = has_tx_irqs; 5311 port->flags = flags; 5312 5313 err = mvpp2_queue_vectors_init(port, port_node); 5314 if (err) 5315 goto err_free_netdev; 5316 5317 if (port_node) 5318 port->link_irq = of_irq_get_byname(port_node, "link"); 5319 else 5320 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); 5321 if (port->link_irq == -EPROBE_DEFER) { 5322 err = -EPROBE_DEFER; 5323 goto err_deinit_qvecs; 5324 } 5325 if (port->link_irq <= 0) 5326 /* the link irq is optional */ 5327 port->link_irq = 0; 5328 5329 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) 5330 port->flags |= MVPP2_F_LOOPBACK; 5331 5332 port->id = id; 5333 if (priv->hw_version == MVPP21) 5334 port->first_rxq = port->id * port->nrxqs; 5335 else 5336 port->first_rxq = port->id * priv->max_port_rxqs; 5337 5338 port->of_node = port_node; 5339 port->phy_interface = phy_mode; 5340 port->comphy = comphy; 5341 5342 if (priv->hw_version == MVPP21) { 5343 port->base = devm_platform_ioremap_resource(pdev, 2 + id); 5344 if (IS_ERR(port->base)) { 5345 err = PTR_ERR(port->base); 5346 goto err_free_irq; 5347 } 5348 5349 port->stats_base = port->priv->lms_base + 5350 MVPP21_MIB_COUNTERS_OFFSET + 5351 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; 5352 } else { 5353 if (fwnode_property_read_u32(port_fwnode, "gop-port-id", 5354 &port->gop_id)) { 5355 err = -EINVAL; 5356 dev_err(&pdev->dev, "missing gop-port-id value\n"); 5357 goto err_deinit_qvecs; 5358 } 5359 5360 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); 5361 port->stats_base = port->priv->iface_base + 5362 MVPP22_MIB_COUNTERS_OFFSET + 5363 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; 5364 } 5365 5366 /* Alloc per-cpu and ethtool stats */ 5367 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); 5368 if (!port->stats) { 5369 err = -ENOMEM; 5370 goto err_free_irq; 5371 } 5372 5373 port->ethtool_stats = devm_kcalloc(&pdev->dev, 5374 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs), 5375 sizeof(u64), GFP_KERNEL); 5376 if (!port->ethtool_stats) { 5377 err = -ENOMEM; 5378 goto err_free_stats; 5379 } 5380 5381 mutex_init(&port->gather_stats_lock); 5382 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); 5383 5384 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); 5385 5386 port->tx_ring_size = MVPP2_MAX_TXD_DFLT; 5387 port->rx_ring_size = MVPP2_MAX_RXD_DFLT; 5388 SET_NETDEV_DEV(dev, &pdev->dev); 5389 5390 err = mvpp2_port_init(port); 5391 if (err < 0) { 5392 dev_err(&pdev->dev, "failed to init port %d\n", id); 5393 goto err_free_stats; 5394 } 5395 5396 mvpp2_port_periodic_xon_disable(port); 5397 5398 mvpp2_mac_reset_assert(port); 5399 mvpp22_pcs_reset_assert(port); 5400 5401 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); 5402 if (!port->pcpu) { 5403 err = -ENOMEM; 5404 goto err_free_txq_pcpu; 5405 } 5406 5407 if (!port->has_tx_irqs) { 5408 for (thread = 0; thread < priv->nthreads; thread++) { 5409 port_pcpu = per_cpu_ptr(port->pcpu, thread); 5410 5411 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, 5412 HRTIMER_MODE_REL_PINNED_SOFT); 5413 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; 5414 port_pcpu->timer_scheduled = false; 5415 port_pcpu->dev = dev; 5416 } 5417 } 5418 5419 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5420 NETIF_F_TSO; 5421 dev->features = features | NETIF_F_RXCSUM; 5422 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | 5423 NETIF_F_HW_VLAN_CTAG_FILTER; 5424 5425 if (mvpp22_rss_is_supported()) { 5426 dev->hw_features |= NETIF_F_RXHASH; 5427 dev->features |= NETIF_F_NTUPLE; 5428 } 5429 5430 if (!port->priv->percpu_pools) 5431 mvpp2_set_hw_csum(port, port->pool_long->id); 5432 5433 dev->vlan_features |= features; 5434 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; 5435 dev->priv_flags |= IFF_UNICAST_FLT; 5436 5437 /* MTU range: 68 - 9704 */ 5438 dev->min_mtu = ETH_MIN_MTU; 5439 /* 9704 == 9728 - 20 and rounding to 8 */ 5440 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; 5441 dev->dev.of_node = port_node; 5442 5443 /* Phylink isn't used w/ ACPI as of now */ 5444 if (port_node) { 5445 port->phylink_config.dev = &dev->dev; 5446 port->phylink_config.type = PHYLINK_NETDEV; 5447 5448 phylink = phylink_create(&port->phylink_config, port_fwnode, 5449 phy_mode, &mvpp2_phylink_ops); 5450 if (IS_ERR(phylink)) { 5451 err = PTR_ERR(phylink); 5452 goto err_free_port_pcpu; 5453 } 5454 port->phylink = phylink; 5455 } else { 5456 port->phylink = NULL; 5457 } 5458 5459 /* Cycle the comphy to power it down, saving 270mW per port - 5460 * don't worry about an error powering it up. When the comphy 5461 * driver does this, we can remove this code. 5462 */ 5463 if (port->comphy) { 5464 err = mvpp22_comphy_init(port); 5465 if (err == 0) 5466 phy_power_off(port->comphy); 5467 } 5468 5469 err = register_netdev(dev); 5470 if (err < 0) { 5471 dev_err(&pdev->dev, "failed to register netdev\n"); 5472 goto err_phylink; 5473 } 5474 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 5475 5476 priv->port_list[priv->port_count++] = port; 5477 5478 return 0; 5479 5480 err_phylink: 5481 if (port->phylink) 5482 phylink_destroy(port->phylink); 5483 err_free_port_pcpu: 5484 free_percpu(port->pcpu); 5485 err_free_txq_pcpu: 5486 for (i = 0; i < port->ntxqs; i++) 5487 free_percpu(port->txqs[i]->pcpu); 5488 err_free_stats: 5489 free_percpu(port->stats); 5490 err_free_irq: 5491 if (port->link_irq) 5492 irq_dispose_mapping(port->link_irq); 5493 err_deinit_qvecs: 5494 mvpp2_queue_vectors_deinit(port); 5495 err_free_netdev: 5496 free_netdev(dev); 5497 return err; 5498 } 5499 5500 /* Ports removal routine */ 5501 static void mvpp2_port_remove(struct mvpp2_port *port) 5502 { 5503 int i; 5504 5505 unregister_netdev(port->dev); 5506 if (port->phylink) 5507 phylink_destroy(port->phylink); 5508 free_percpu(port->pcpu); 5509 free_percpu(port->stats); 5510 for (i = 0; i < port->ntxqs; i++) 5511 free_percpu(port->txqs[i]->pcpu); 5512 mvpp2_queue_vectors_deinit(port); 5513 if (port->link_irq) 5514 irq_dispose_mapping(port->link_irq); 5515 free_netdev(port->dev); 5516 } 5517 5518 /* Initialize decoding windows */ 5519 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 5520 struct mvpp2 *priv) 5521 { 5522 u32 win_enable; 5523 int i; 5524 5525 for (i = 0; i < 6; i++) { 5526 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 5527 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 5528 5529 if (i < 4) 5530 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 5531 } 5532 5533 win_enable = 0; 5534 5535 for (i = 0; i < dram->num_cs; i++) { 5536 const struct mbus_dram_window *cs = dram->cs + i; 5537 5538 mvpp2_write(priv, MVPP2_WIN_BASE(i), 5539 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 5540 dram->mbus_dram_target_id); 5541 5542 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 5543 (cs->size - 1) & 0xffff0000); 5544 5545 win_enable |= (1 << i); 5546 } 5547 5548 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 5549 } 5550 5551 /* Initialize Rx FIFO's */ 5552 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 5553 { 5554 int port; 5555 5556 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 5557 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 5558 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 5559 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 5560 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); 5561 } 5562 5563 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 5564 MVPP2_RX_FIFO_PORT_MIN_PKT); 5565 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 5566 } 5567 5568 static void mvpp22_rx_fifo_init(struct mvpp2 *priv) 5569 { 5570 int port; 5571 5572 /* The FIFO size parameters are set depending on the maximum speed a 5573 * given port can handle: 5574 * - Port 0: 10Gbps 5575 * - Port 1: 2.5Gbps 5576 * - Ports 2 and 3: 1Gbps 5577 */ 5578 5579 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), 5580 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); 5581 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), 5582 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); 5583 5584 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), 5585 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); 5586 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), 5587 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); 5588 5589 for (port = 2; port < MVPP2_MAX_PORTS; port++) { 5590 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 5591 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 5592 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 5593 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); 5594 } 5595 5596 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 5597 MVPP2_RX_FIFO_PORT_MIN_PKT); 5598 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 5599 } 5600 5601 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G 5602 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, 5603 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. 5604 */ 5605 static void mvpp22_tx_fifo_init(struct mvpp2 *priv) 5606 { 5607 int port, size, thrs; 5608 5609 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 5610 if (port == 0) { 5611 size = MVPP22_TX_FIFO_DATA_SIZE_10KB; 5612 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; 5613 } else { 5614 size = MVPP22_TX_FIFO_DATA_SIZE_3KB; 5615 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; 5616 } 5617 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); 5618 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); 5619 } 5620 } 5621 5622 static void mvpp2_axi_init(struct mvpp2 *priv) 5623 { 5624 u32 val, rdval, wrval; 5625 5626 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 5627 5628 /* AXI Bridge Configuration */ 5629 5630 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 5631 << MVPP22_AXI_ATTR_CACHE_OFFS; 5632 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 5633 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 5634 5635 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 5636 << MVPP22_AXI_ATTR_CACHE_OFFS; 5637 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 5638 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 5639 5640 /* BM */ 5641 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 5642 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 5643 5644 /* Descriptors */ 5645 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 5646 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 5647 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 5648 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 5649 5650 /* Buffer Data */ 5651 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 5652 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 5653 5654 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 5655 << MVPP22_AXI_CODE_CACHE_OFFS; 5656 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 5657 << MVPP22_AXI_CODE_DOMAIN_OFFS; 5658 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 5659 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 5660 5661 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 5662 << MVPP22_AXI_CODE_CACHE_OFFS; 5663 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 5664 << MVPP22_AXI_CODE_DOMAIN_OFFS; 5665 5666 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 5667 5668 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 5669 << MVPP22_AXI_CODE_CACHE_OFFS; 5670 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 5671 << MVPP22_AXI_CODE_DOMAIN_OFFS; 5672 5673 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 5674 } 5675 5676 /* Initialize network controller common part HW */ 5677 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) 5678 { 5679 const struct mbus_dram_target_info *dram_target_info; 5680 int err, i; 5681 u32 val; 5682 5683 /* MBUS windows configuration */ 5684 dram_target_info = mv_mbus_dram_info(); 5685 if (dram_target_info) 5686 mvpp2_conf_mbus_windows(dram_target_info, priv); 5687 5688 if (priv->hw_version == MVPP22) 5689 mvpp2_axi_init(priv); 5690 5691 /* Disable HW PHY polling */ 5692 if (priv->hw_version == MVPP21) { 5693 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5694 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 5695 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5696 } else { 5697 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5698 val &= ~MVPP22_SMI_POLLING_EN; 5699 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5700 } 5701 5702 /* Allocate and initialize aggregated TXQs */ 5703 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, 5704 sizeof(*priv->aggr_txqs), 5705 GFP_KERNEL); 5706 if (!priv->aggr_txqs) 5707 return -ENOMEM; 5708 5709 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 5710 priv->aggr_txqs[i].id = i; 5711 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 5712 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); 5713 if (err < 0) 5714 return err; 5715 } 5716 5717 /* Fifo Init */ 5718 if (priv->hw_version == MVPP21) { 5719 mvpp2_rx_fifo_init(priv); 5720 } else { 5721 mvpp22_rx_fifo_init(priv); 5722 mvpp22_tx_fifo_init(priv); 5723 } 5724 5725 if (priv->hw_version == MVPP21) 5726 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 5727 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 5728 5729 /* Allow cache snoop when transmiting packets */ 5730 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 5731 5732 /* Buffer Manager initialization */ 5733 err = mvpp2_bm_init(&pdev->dev, priv); 5734 if (err < 0) 5735 return err; 5736 5737 /* Parser default initialization */ 5738 err = mvpp2_prs_default_init(pdev, priv); 5739 if (err < 0) 5740 return err; 5741 5742 /* Classifier default initialization */ 5743 mvpp2_cls_init(priv); 5744 5745 return 0; 5746 } 5747 5748 static int mvpp2_probe(struct platform_device *pdev) 5749 { 5750 const struct acpi_device_id *acpi_id; 5751 struct fwnode_handle *fwnode = pdev->dev.fwnode; 5752 struct fwnode_handle *port_fwnode; 5753 struct mvpp2 *priv; 5754 struct resource *res; 5755 void __iomem *base; 5756 int i, shared; 5757 int err; 5758 5759 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 5760 if (!priv) 5761 return -ENOMEM; 5762 5763 if (has_acpi_companion(&pdev->dev)) { 5764 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, 5765 &pdev->dev); 5766 if (!acpi_id) 5767 return -EINVAL; 5768 priv->hw_version = (unsigned long)acpi_id->driver_data; 5769 } else { 5770 priv->hw_version = 5771 (unsigned long)of_device_get_match_data(&pdev->dev); 5772 } 5773 5774 /* multi queue mode isn't supported on PPV2.1, fallback to single 5775 * mode 5776 */ 5777 if (priv->hw_version == MVPP21) 5778 queue_mode = MVPP2_QDIST_SINGLE_MODE; 5779 5780 base = devm_platform_ioremap_resource(pdev, 0); 5781 if (IS_ERR(base)) 5782 return PTR_ERR(base); 5783 5784 if (priv->hw_version == MVPP21) { 5785 priv->lms_base = devm_platform_ioremap_resource(pdev, 1); 5786 if (IS_ERR(priv->lms_base)) 5787 return PTR_ERR(priv->lms_base); 5788 } else { 5789 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 5790 if (has_acpi_companion(&pdev->dev)) { 5791 /* In case the MDIO memory region is declared in 5792 * the ACPI, it can already appear as 'in-use' 5793 * in the OS. Because it is overlapped by second 5794 * region of the network controller, make 5795 * sure it is released, before requesting it again. 5796 * The care is taken by mvpp2 driver to avoid 5797 * concurrent access to this memory region. 5798 */ 5799 release_resource(res); 5800 } 5801 priv->iface_base = devm_ioremap_resource(&pdev->dev, res); 5802 if (IS_ERR(priv->iface_base)) 5803 return PTR_ERR(priv->iface_base); 5804 } 5805 5806 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) { 5807 priv->sysctrl_base = 5808 syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 5809 "marvell,system-controller"); 5810 if (IS_ERR(priv->sysctrl_base)) 5811 /* The system controller regmap is optional for dt 5812 * compatibility reasons. When not provided, the 5813 * configuration of the GoP relies on the 5814 * firmware/bootloader. 5815 */ 5816 priv->sysctrl_base = NULL; 5817 } 5818 5819 if (priv->hw_version == MVPP22 && 5820 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) 5821 priv->percpu_pools = 1; 5822 5823 mvpp2_setup_bm_pool(); 5824 5825 5826 priv->nthreads = min_t(unsigned int, num_present_cpus(), 5827 MVPP2_MAX_THREADS); 5828 5829 shared = num_present_cpus() - priv->nthreads; 5830 if (shared > 0) 5831 bitmap_fill(&priv->lock_map, 5832 min_t(int, shared, MVPP2_MAX_THREADS)); 5833 5834 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 5835 u32 addr_space_sz; 5836 5837 addr_space_sz = (priv->hw_version == MVPP21 ? 5838 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); 5839 priv->swth_base[i] = base + i * addr_space_sz; 5840 } 5841 5842 if (priv->hw_version == MVPP21) 5843 priv->max_port_rxqs = 8; 5844 else 5845 priv->max_port_rxqs = 32; 5846 5847 if (dev_of_node(&pdev->dev)) { 5848 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); 5849 if (IS_ERR(priv->pp_clk)) 5850 return PTR_ERR(priv->pp_clk); 5851 err = clk_prepare_enable(priv->pp_clk); 5852 if (err < 0) 5853 return err; 5854 5855 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); 5856 if (IS_ERR(priv->gop_clk)) { 5857 err = PTR_ERR(priv->gop_clk); 5858 goto err_pp_clk; 5859 } 5860 err = clk_prepare_enable(priv->gop_clk); 5861 if (err < 0) 5862 goto err_pp_clk; 5863 5864 if (priv->hw_version == MVPP22) { 5865 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); 5866 if (IS_ERR(priv->mg_clk)) { 5867 err = PTR_ERR(priv->mg_clk); 5868 goto err_gop_clk; 5869 } 5870 5871 err = clk_prepare_enable(priv->mg_clk); 5872 if (err < 0) 5873 goto err_gop_clk; 5874 5875 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); 5876 if (IS_ERR(priv->mg_core_clk)) { 5877 priv->mg_core_clk = NULL; 5878 } else { 5879 err = clk_prepare_enable(priv->mg_core_clk); 5880 if (err < 0) 5881 goto err_mg_clk; 5882 } 5883 } 5884 5885 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); 5886 if (IS_ERR(priv->axi_clk)) { 5887 err = PTR_ERR(priv->axi_clk); 5888 if (err == -EPROBE_DEFER) 5889 goto err_mg_core_clk; 5890 priv->axi_clk = NULL; 5891 } else { 5892 err = clk_prepare_enable(priv->axi_clk); 5893 if (err < 0) 5894 goto err_mg_core_clk; 5895 } 5896 5897 /* Get system's tclk rate */ 5898 priv->tclk = clk_get_rate(priv->pp_clk); 5899 } else if (device_property_read_u32(&pdev->dev, "clock-frequency", 5900 &priv->tclk)) { 5901 dev_err(&pdev->dev, "missing clock-frequency value\n"); 5902 return -EINVAL; 5903 } 5904 5905 if (priv->hw_version == MVPP22) { 5906 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); 5907 if (err) 5908 goto err_axi_clk; 5909 /* Sadly, the BM pools all share the same register to 5910 * store the high 32 bits of their address. So they 5911 * must all have the same high 32 bits, which forces 5912 * us to restrict coherent memory to DMA_BIT_MASK(32). 5913 */ 5914 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 5915 if (err) 5916 goto err_axi_clk; 5917 } 5918 5919 /* Initialize network controller */ 5920 err = mvpp2_init(pdev, priv); 5921 if (err < 0) { 5922 dev_err(&pdev->dev, "failed to initialize controller\n"); 5923 goto err_axi_clk; 5924 } 5925 5926 /* Initialize ports */ 5927 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 5928 err = mvpp2_port_probe(pdev, port_fwnode, priv); 5929 if (err < 0) 5930 goto err_port_probe; 5931 } 5932 5933 if (priv->port_count == 0) { 5934 dev_err(&pdev->dev, "no ports enabled\n"); 5935 err = -ENODEV; 5936 goto err_axi_clk; 5937 } 5938 5939 /* Statistics must be gathered regularly because some of them (like 5940 * packets counters) are 32-bit registers and could overflow quite 5941 * quickly. For instance, a 10Gb link used at full bandwidth with the 5942 * smallest packets (64B) will overflow a 32-bit counter in less than 5943 * 30 seconds. Then, use a workqueue to fill 64-bit counters. 5944 */ 5945 snprintf(priv->queue_name, sizeof(priv->queue_name), 5946 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), 5947 priv->port_count > 1 ? "+" : ""); 5948 priv->stats_queue = create_singlethread_workqueue(priv->queue_name); 5949 if (!priv->stats_queue) { 5950 err = -ENOMEM; 5951 goto err_port_probe; 5952 } 5953 5954 mvpp2_dbgfs_init(priv, pdev->name); 5955 5956 platform_set_drvdata(pdev, priv); 5957 return 0; 5958 5959 err_port_probe: 5960 i = 0; 5961 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 5962 if (priv->port_list[i]) 5963 mvpp2_port_remove(priv->port_list[i]); 5964 i++; 5965 } 5966 err_axi_clk: 5967 clk_disable_unprepare(priv->axi_clk); 5968 5969 err_mg_core_clk: 5970 if (priv->hw_version == MVPP22) 5971 clk_disable_unprepare(priv->mg_core_clk); 5972 err_mg_clk: 5973 if (priv->hw_version == MVPP22) 5974 clk_disable_unprepare(priv->mg_clk); 5975 err_gop_clk: 5976 clk_disable_unprepare(priv->gop_clk); 5977 err_pp_clk: 5978 clk_disable_unprepare(priv->pp_clk); 5979 return err; 5980 } 5981 5982 static int mvpp2_remove(struct platform_device *pdev) 5983 { 5984 struct mvpp2 *priv = platform_get_drvdata(pdev); 5985 struct fwnode_handle *fwnode = pdev->dev.fwnode; 5986 struct fwnode_handle *port_fwnode; 5987 int i = 0; 5988 5989 mvpp2_dbgfs_cleanup(priv); 5990 5991 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 5992 if (priv->port_list[i]) { 5993 mutex_destroy(&priv->port_list[i]->gather_stats_lock); 5994 mvpp2_port_remove(priv->port_list[i]); 5995 } 5996 i++; 5997 } 5998 5999 destroy_workqueue(priv->stats_queue); 6000 6001 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 6002 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; 6003 6004 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); 6005 } 6006 6007 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 6008 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; 6009 6010 dma_free_coherent(&pdev->dev, 6011 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 6012 aggr_txq->descs, 6013 aggr_txq->descs_dma); 6014 } 6015 6016 if (is_acpi_node(port_fwnode)) 6017 return 0; 6018 6019 clk_disable_unprepare(priv->axi_clk); 6020 clk_disable_unprepare(priv->mg_core_clk); 6021 clk_disable_unprepare(priv->mg_clk); 6022 clk_disable_unprepare(priv->pp_clk); 6023 clk_disable_unprepare(priv->gop_clk); 6024 6025 return 0; 6026 } 6027 6028 static const struct of_device_id mvpp2_match[] = { 6029 { 6030 .compatible = "marvell,armada-375-pp2", 6031 .data = (void *)MVPP21, 6032 }, 6033 { 6034 .compatible = "marvell,armada-7k-pp22", 6035 .data = (void *)MVPP22, 6036 }, 6037 { } 6038 }; 6039 MODULE_DEVICE_TABLE(of, mvpp2_match); 6040 6041 static const struct acpi_device_id mvpp2_acpi_match[] = { 6042 { "MRVL0110", MVPP22 }, 6043 { }, 6044 }; 6045 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); 6046 6047 static struct platform_driver mvpp2_driver = { 6048 .probe = mvpp2_probe, 6049 .remove = mvpp2_remove, 6050 .driver = { 6051 .name = MVPP2_DRIVER_NAME, 6052 .of_match_table = mvpp2_match, 6053 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), 6054 }, 6055 }; 6056 6057 module_platform_driver(mvpp2_driver); 6058 6059 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); 6060 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); 6061 MODULE_LICENSE("GPL v2"); 6062