1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 4 * 5 * Copyright (C) 2014 Marvell 6 * 7 * Marcin Wojtas <mw@semihalf.com> 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/kernel.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/platform_device.h> 15 #include <linux/skbuff.h> 16 #include <linux/inetdevice.h> 17 #include <linux/mbus.h> 18 #include <linux/module.h> 19 #include <linux/mfd/syscon.h> 20 #include <linux/interrupt.h> 21 #include <linux/cpumask.h> 22 #include <linux/of.h> 23 #include <linux/of_irq.h> 24 #include <linux/of_mdio.h> 25 #include <linux/of_net.h> 26 #include <linux/of_address.h> 27 #include <linux/of_device.h> 28 #include <linux/phy.h> 29 #include <linux/phylink.h> 30 #include <linux/phy/phy.h> 31 #include <linux/ptp_classify.h> 32 #include <linux/clk.h> 33 #include <linux/hrtimer.h> 34 #include <linux/ktime.h> 35 #include <linux/regmap.h> 36 #include <uapi/linux/ppp_defs.h> 37 #include <net/ip.h> 38 #include <net/ipv6.h> 39 #include <net/tso.h> 40 #include <linux/bpf_trace.h> 41 42 #include "mvpp2.h" 43 #include "mvpp2_prs.h" 44 #include "mvpp2_cls.h" 45 46 enum mvpp2_bm_pool_log_num { 47 MVPP2_BM_SHORT, 48 MVPP2_BM_LONG, 49 MVPP2_BM_JUMBO, 50 MVPP2_BM_POOLS_NUM 51 }; 52 53 static struct { 54 int pkt_size; 55 int buf_num; 56 } mvpp2_pools[MVPP2_BM_POOLS_NUM]; 57 58 /* The prototype is added here to be used in start_dev when using ACPI. This 59 * will be removed once phylink is used for all modes (dt+ACPI). 60 */ 61 static void mvpp2_acpi_start(struct mvpp2_port *port); 62 63 /* Queue modes */ 64 #define MVPP2_QDIST_SINGLE_MODE 0 65 #define MVPP2_QDIST_MULTI_MODE 1 66 67 static int queue_mode = MVPP2_QDIST_MULTI_MODE; 68 69 module_param(queue_mode, int, 0444); 70 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); 71 72 /* Utility/helper methods */ 73 74 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 75 { 76 writel(data, priv->swth_base[0] + offset); 77 } 78 79 u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 80 { 81 return readl(priv->swth_base[0] + offset); 82 } 83 84 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) 85 { 86 return readl_relaxed(priv->swth_base[0] + offset); 87 } 88 89 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) 90 { 91 return cpu % priv->nthreads; 92 } 93 94 static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data) 95 { 96 writel(data, priv->cm3_base + offset); 97 } 98 99 static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset) 100 { 101 return readl(priv->cm3_base + offset); 102 } 103 104 static struct page_pool * 105 mvpp2_create_page_pool(struct device *dev, int num, int len, 106 enum dma_data_direction dma_dir) 107 { 108 struct page_pool_params pp_params = { 109 /* internal DMA mapping in page_pool */ 110 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 111 .pool_size = num, 112 .nid = NUMA_NO_NODE, 113 .dev = dev, 114 .dma_dir = dma_dir, 115 .offset = MVPP2_SKB_HEADROOM, 116 .max_len = len, 117 }; 118 119 return page_pool_create(&pp_params); 120 } 121 122 /* These accessors should be used to access: 123 * 124 * - per-thread registers, where each thread has its own copy of the 125 * register. 126 * 127 * MVPP2_BM_VIRT_ALLOC_REG 128 * MVPP2_BM_ADDR_HIGH_ALLOC 129 * MVPP22_BM_ADDR_HIGH_RLS_REG 130 * MVPP2_BM_VIRT_RLS_REG 131 * MVPP2_ISR_RX_TX_CAUSE_REG 132 * MVPP2_ISR_RX_TX_MASK_REG 133 * MVPP2_TXQ_NUM_REG 134 * MVPP2_AGGR_TXQ_UPDATE_REG 135 * MVPP2_TXQ_RSVD_REQ_REG 136 * MVPP2_TXQ_RSVD_RSLT_REG 137 * MVPP2_TXQ_SENT_REG 138 * MVPP2_RXQ_NUM_REG 139 * 140 * - global registers that must be accessed through a specific thread 141 * window, because they are related to an access to a per-thread 142 * register 143 * 144 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) 145 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) 146 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) 147 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) 148 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) 149 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) 150 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 151 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) 152 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) 153 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) 154 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 155 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 156 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 157 */ 158 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, 159 u32 offset, u32 data) 160 { 161 writel(data, priv->swth_base[thread] + offset); 162 } 163 164 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, 165 u32 offset) 166 { 167 return readl(priv->swth_base[thread] + offset); 168 } 169 170 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, 171 u32 offset, u32 data) 172 { 173 writel_relaxed(data, priv->swth_base[thread] + offset); 174 } 175 176 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, 177 u32 offset) 178 { 179 return readl_relaxed(priv->swth_base[thread] + offset); 180 } 181 182 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, 183 struct mvpp2_tx_desc *tx_desc) 184 { 185 if (port->priv->hw_version == MVPP21) 186 return le32_to_cpu(tx_desc->pp21.buf_dma_addr); 187 else 188 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & 189 MVPP2_DESC_DMA_MASK; 190 } 191 192 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 193 struct mvpp2_tx_desc *tx_desc, 194 dma_addr_t dma_addr) 195 { 196 dma_addr_t addr, offset; 197 198 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; 199 offset = dma_addr & MVPP2_TX_DESC_ALIGN; 200 201 if (port->priv->hw_version == MVPP21) { 202 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); 203 tx_desc->pp21.packet_offset = offset; 204 } else { 205 __le64 val = cpu_to_le64(addr); 206 207 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); 208 tx_desc->pp22.buf_dma_addr_ptp |= val; 209 tx_desc->pp22.packet_offset = offset; 210 } 211 } 212 213 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, 214 struct mvpp2_tx_desc *tx_desc) 215 { 216 if (port->priv->hw_version == MVPP21) 217 return le16_to_cpu(tx_desc->pp21.data_size); 218 else 219 return le16_to_cpu(tx_desc->pp22.data_size); 220 } 221 222 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 223 struct mvpp2_tx_desc *tx_desc, 224 size_t size) 225 { 226 if (port->priv->hw_version == MVPP21) 227 tx_desc->pp21.data_size = cpu_to_le16(size); 228 else 229 tx_desc->pp22.data_size = cpu_to_le16(size); 230 } 231 232 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 233 struct mvpp2_tx_desc *tx_desc, 234 unsigned int txq) 235 { 236 if (port->priv->hw_version == MVPP21) 237 tx_desc->pp21.phys_txq = txq; 238 else 239 tx_desc->pp22.phys_txq = txq; 240 } 241 242 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 243 struct mvpp2_tx_desc *tx_desc, 244 unsigned int command) 245 { 246 if (port->priv->hw_version == MVPP21) 247 tx_desc->pp21.command = cpu_to_le32(command); 248 else 249 tx_desc->pp22.command = cpu_to_le32(command); 250 } 251 252 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, 253 struct mvpp2_tx_desc *tx_desc) 254 { 255 if (port->priv->hw_version == MVPP21) 256 return tx_desc->pp21.packet_offset; 257 else 258 return tx_desc->pp22.packet_offset; 259 } 260 261 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 262 struct mvpp2_rx_desc *rx_desc) 263 { 264 if (port->priv->hw_version == MVPP21) 265 return le32_to_cpu(rx_desc->pp21.buf_dma_addr); 266 else 267 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & 268 MVPP2_DESC_DMA_MASK; 269 } 270 271 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 272 struct mvpp2_rx_desc *rx_desc) 273 { 274 if (port->priv->hw_version == MVPP21) 275 return le32_to_cpu(rx_desc->pp21.buf_cookie); 276 else 277 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & 278 MVPP2_DESC_DMA_MASK; 279 } 280 281 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 282 struct mvpp2_rx_desc *rx_desc) 283 { 284 if (port->priv->hw_version == MVPP21) 285 return le16_to_cpu(rx_desc->pp21.data_size); 286 else 287 return le16_to_cpu(rx_desc->pp22.data_size); 288 } 289 290 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 291 struct mvpp2_rx_desc *rx_desc) 292 { 293 if (port->priv->hw_version == MVPP21) 294 return le32_to_cpu(rx_desc->pp21.status); 295 else 296 return le32_to_cpu(rx_desc->pp22.status); 297 } 298 299 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 300 { 301 txq_pcpu->txq_get_index++; 302 if (txq_pcpu->txq_get_index == txq_pcpu->size) 303 txq_pcpu->txq_get_index = 0; 304 } 305 306 static void mvpp2_txq_inc_put(struct mvpp2_port *port, 307 struct mvpp2_txq_pcpu *txq_pcpu, 308 void *data, 309 struct mvpp2_tx_desc *tx_desc, 310 enum mvpp2_tx_buf_type buf_type) 311 { 312 struct mvpp2_txq_pcpu_buf *tx_buf = 313 txq_pcpu->buffs + txq_pcpu->txq_put_index; 314 tx_buf->type = buf_type; 315 if (buf_type == MVPP2_TYPE_SKB) 316 tx_buf->skb = data; 317 else 318 tx_buf->xdpf = data; 319 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); 320 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + 321 mvpp2_txdesc_offset_get(port, tx_desc); 322 txq_pcpu->txq_put_index++; 323 if (txq_pcpu->txq_put_index == txq_pcpu->size) 324 txq_pcpu->txq_put_index = 0; 325 } 326 327 /* Get number of maximum RXQ */ 328 static int mvpp2_get_nrxqs(struct mvpp2 *priv) 329 { 330 unsigned int nrxqs; 331 332 if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) 333 return 1; 334 335 /* According to the PPv2.2 datasheet and our experiments on 336 * PPv2.1, RX queues have an allocation granularity of 4 (when 337 * more than a single one on PPv2.2). 338 * Round up to nearest multiple of 4. 339 */ 340 nrxqs = (num_possible_cpus() + 3) & ~0x3; 341 if (nrxqs > MVPP2_PORT_MAX_RXQ) 342 nrxqs = MVPP2_PORT_MAX_RXQ; 343 344 return nrxqs; 345 } 346 347 /* Get number of physical egress port */ 348 static inline int mvpp2_egress_port(struct mvpp2_port *port) 349 { 350 return MVPP2_MAX_TCONT + port->id; 351 } 352 353 /* Get number of physical TXQ */ 354 static inline int mvpp2_txq_phys(int port, int txq) 355 { 356 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 357 } 358 359 /* Returns a struct page if page_pool is set, otherwise a buffer */ 360 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool, 361 struct page_pool *page_pool) 362 { 363 if (page_pool) 364 return page_pool_dev_alloc_pages(page_pool); 365 366 if (likely(pool->frag_size <= PAGE_SIZE)) 367 return netdev_alloc_frag(pool->frag_size); 368 369 return kmalloc(pool->frag_size, GFP_ATOMIC); 370 } 371 372 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, 373 struct page_pool *page_pool, void *data) 374 { 375 if (page_pool) 376 page_pool_put_full_page(page_pool, virt_to_head_page(data), false); 377 else if (likely(pool->frag_size <= PAGE_SIZE)) 378 skb_free_frag(data); 379 else 380 kfree(data); 381 } 382 383 /* Buffer Manager configuration routines */ 384 385 /* Create pool */ 386 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, 387 struct mvpp2_bm_pool *bm_pool, int size) 388 { 389 u32 val; 390 391 /* Number of buffer pointers must be a multiple of 16, as per 392 * hardware constraints 393 */ 394 if (!IS_ALIGNED(size, 16)) 395 return -EINVAL; 396 397 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16 398 * bytes per buffer pointer 399 */ 400 if (priv->hw_version == MVPP21) 401 bm_pool->size_bytes = 2 * sizeof(u32) * size; 402 else 403 bm_pool->size_bytes = 2 * sizeof(u64) * size; 404 405 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes, 406 &bm_pool->dma_addr, 407 GFP_KERNEL); 408 if (!bm_pool->virt_addr) 409 return -ENOMEM; 410 411 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 412 MVPP2_BM_POOL_PTR_ALIGN)) { 413 dma_free_coherent(dev, bm_pool->size_bytes, 414 bm_pool->virt_addr, bm_pool->dma_addr); 415 dev_err(dev, "BM pool %d is not %d bytes aligned\n", 416 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 417 return -ENOMEM; 418 } 419 420 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 421 lower_32_bits(bm_pool->dma_addr)); 422 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 423 424 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 425 val |= MVPP2_BM_START_MASK; 426 427 val &= ~MVPP2_BM_LOW_THRESH_MASK; 428 val &= ~MVPP2_BM_HIGH_THRESH_MASK; 429 430 /* Set 8 Pools BPPI threshold for MVPP23 */ 431 if (priv->hw_version == MVPP23) { 432 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH); 433 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH); 434 } else { 435 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH); 436 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH); 437 } 438 439 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 440 441 bm_pool->size = size; 442 bm_pool->pkt_size = 0; 443 bm_pool->buf_num = 0; 444 445 return 0; 446 } 447 448 /* Set pool buffer size */ 449 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 450 struct mvpp2_bm_pool *bm_pool, 451 int buf_size) 452 { 453 u32 val; 454 455 bm_pool->buf_size = buf_size; 456 457 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 458 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 459 } 460 461 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, 462 struct mvpp2_bm_pool *bm_pool, 463 dma_addr_t *dma_addr, 464 phys_addr_t *phys_addr) 465 { 466 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); 467 468 *dma_addr = mvpp2_thread_read(priv, thread, 469 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 470 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); 471 472 if (priv->hw_version >= MVPP22) { 473 u32 val; 474 u32 dma_addr_highbits, phys_addr_highbits; 475 476 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); 477 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); 478 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> 479 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; 480 481 if (sizeof(dma_addr_t) == 8) 482 *dma_addr |= (u64)dma_addr_highbits << 32; 483 484 if (sizeof(phys_addr_t) == 8) 485 *phys_addr |= (u64)phys_addr_highbits << 32; 486 } 487 488 put_cpu(); 489 } 490 491 /* Free all buffers from the pool */ 492 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, 493 struct mvpp2_bm_pool *bm_pool, int buf_num) 494 { 495 struct page_pool *pp = NULL; 496 int i; 497 498 if (buf_num > bm_pool->buf_num) { 499 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", 500 bm_pool->id, buf_num); 501 buf_num = bm_pool->buf_num; 502 } 503 504 if (priv->percpu_pools) 505 pp = priv->page_pool[bm_pool->id]; 506 507 for (i = 0; i < buf_num; i++) { 508 dma_addr_t buf_dma_addr; 509 phys_addr_t buf_phys_addr; 510 void *data; 511 512 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, 513 &buf_dma_addr, &buf_phys_addr); 514 515 if (!pp) 516 dma_unmap_single(dev, buf_dma_addr, 517 bm_pool->buf_size, DMA_FROM_DEVICE); 518 519 data = (void *)phys_to_virt(buf_phys_addr); 520 if (!data) 521 break; 522 523 mvpp2_frag_free(bm_pool, pp, data); 524 } 525 526 /* Update BM driver with number of buffers removed from pool */ 527 bm_pool->buf_num -= i; 528 } 529 530 /* Check number of buffers in BM pool */ 531 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) 532 { 533 int buf_num = 0; 534 535 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & 536 MVPP22_BM_POOL_PTRS_NUM_MASK; 537 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & 538 MVPP2_BM_BPPI_PTR_NUM_MASK; 539 540 /* HW has one buffer ready which is not reflected in the counters */ 541 if (buf_num) 542 buf_num += 1; 543 544 return buf_num; 545 } 546 547 /* Cleanup pool */ 548 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, 549 struct mvpp2_bm_pool *bm_pool) 550 { 551 int buf_num; 552 u32 val; 553 554 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); 555 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); 556 557 /* Check buffer counters after free */ 558 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); 559 if (buf_num) { 560 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", 561 bm_pool->id, bm_pool->buf_num); 562 return 0; 563 } 564 565 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 566 val |= MVPP2_BM_STOP_MASK; 567 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 568 569 if (priv->percpu_pools) { 570 page_pool_destroy(priv->page_pool[bm_pool->id]); 571 priv->page_pool[bm_pool->id] = NULL; 572 } 573 574 dma_free_coherent(dev, bm_pool->size_bytes, 575 bm_pool->virt_addr, 576 bm_pool->dma_addr); 577 return 0; 578 } 579 580 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) 581 { 582 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; 583 struct mvpp2_bm_pool *bm_pool; 584 585 if (priv->percpu_pools) 586 poolnum = mvpp2_get_nrxqs(priv) * 2; 587 588 /* Create all pools with maximum size */ 589 size = MVPP2_BM_POOL_SIZE_MAX; 590 for (i = 0; i < poolnum; i++) { 591 bm_pool = &priv->bm_pools[i]; 592 bm_pool->id = i; 593 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 594 if (err) 595 goto err_unroll_pools; 596 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 597 } 598 return 0; 599 600 err_unroll_pools: 601 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); 602 for (i = i - 1; i >= 0; i--) 603 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 604 return err; 605 } 606 607 /* Routine enable PPv23 8 pool mode */ 608 static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv) 609 { 610 int val; 611 612 val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG); 613 val |= MVPP23_BM_8POOL_MODE; 614 mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val); 615 } 616 617 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) 618 { 619 enum dma_data_direction dma_dir = DMA_FROM_DEVICE; 620 int i, err, poolnum = MVPP2_BM_POOLS_NUM; 621 struct mvpp2_port *port; 622 623 if (priv->percpu_pools) { 624 for (i = 0; i < priv->port_count; i++) { 625 port = priv->port_list[i]; 626 if (port->xdp_prog) { 627 dma_dir = DMA_BIDIRECTIONAL; 628 break; 629 } 630 } 631 632 poolnum = mvpp2_get_nrxqs(priv) * 2; 633 for (i = 0; i < poolnum; i++) { 634 /* the pool in use */ 635 int pn = i / (poolnum / 2); 636 637 priv->page_pool[i] = 638 mvpp2_create_page_pool(dev, 639 mvpp2_pools[pn].buf_num, 640 mvpp2_pools[pn].pkt_size, 641 dma_dir); 642 if (IS_ERR(priv->page_pool[i])) { 643 int j; 644 645 for (j = 0; j < i; j++) { 646 page_pool_destroy(priv->page_pool[j]); 647 priv->page_pool[j] = NULL; 648 } 649 return PTR_ERR(priv->page_pool[i]); 650 } 651 } 652 } 653 654 dev_info(dev, "using %d %s buffers\n", poolnum, 655 priv->percpu_pools ? "per-cpu" : "shared"); 656 657 for (i = 0; i < poolnum; i++) { 658 /* Mask BM all interrupts */ 659 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 660 /* Clear BM cause register */ 661 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 662 } 663 664 /* Allocate and initialize BM pools */ 665 priv->bm_pools = devm_kcalloc(dev, poolnum, 666 sizeof(*priv->bm_pools), GFP_KERNEL); 667 if (!priv->bm_pools) 668 return -ENOMEM; 669 670 if (priv->hw_version == MVPP23) 671 mvpp23_bm_set_8pool_mode(priv); 672 673 err = mvpp2_bm_pools_init(dev, priv); 674 if (err < 0) 675 return err; 676 return 0; 677 } 678 679 static void mvpp2_setup_bm_pool(void) 680 { 681 /* Short pool */ 682 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; 683 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; 684 685 /* Long pool */ 686 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; 687 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; 688 689 /* Jumbo pool */ 690 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; 691 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; 692 } 693 694 /* Attach long pool to rxq */ 695 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 696 int lrxq, int long_pool) 697 { 698 u32 val, mask; 699 int prxq; 700 701 /* Get queue physical ID */ 702 prxq = port->rxqs[lrxq]->id; 703 704 if (port->priv->hw_version == MVPP21) 705 mask = MVPP21_RXQ_POOL_LONG_MASK; 706 else 707 mask = MVPP22_RXQ_POOL_LONG_MASK; 708 709 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 710 val &= ~mask; 711 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 712 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 713 } 714 715 /* Attach short pool to rxq */ 716 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, 717 int lrxq, int short_pool) 718 { 719 u32 val, mask; 720 int prxq; 721 722 /* Get queue physical ID */ 723 prxq = port->rxqs[lrxq]->id; 724 725 if (port->priv->hw_version == MVPP21) 726 mask = MVPP21_RXQ_POOL_SHORT_MASK; 727 else 728 mask = MVPP22_RXQ_POOL_SHORT_MASK; 729 730 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 731 val &= ~mask; 732 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; 733 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 734 } 735 736 static void *mvpp2_buf_alloc(struct mvpp2_port *port, 737 struct mvpp2_bm_pool *bm_pool, 738 struct page_pool *page_pool, 739 dma_addr_t *buf_dma_addr, 740 phys_addr_t *buf_phys_addr, 741 gfp_t gfp_mask) 742 { 743 dma_addr_t dma_addr; 744 struct page *page; 745 void *data; 746 747 data = mvpp2_frag_alloc(bm_pool, page_pool); 748 if (!data) 749 return NULL; 750 751 if (page_pool) { 752 page = (struct page *)data; 753 dma_addr = page_pool_get_dma_addr(page); 754 data = page_to_virt(page); 755 } else { 756 dma_addr = dma_map_single(port->dev->dev.parent, data, 757 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), 758 DMA_FROM_DEVICE); 759 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { 760 mvpp2_frag_free(bm_pool, NULL, data); 761 return NULL; 762 } 763 } 764 *buf_dma_addr = dma_addr; 765 *buf_phys_addr = virt_to_phys(data); 766 767 return data; 768 } 769 770 /* Routine enable flow control for RXQs condition */ 771 static void mvpp2_rxq_enable_fc(struct mvpp2_port *port) 772 { 773 int val, cm3_state, host_id, q; 774 int fq = port->first_rxq; 775 unsigned long flags; 776 777 spin_lock_irqsave(&port->priv->mss_spinlock, flags); 778 779 /* Remove Flow control enable bit to prevent race between FW and Kernel 780 * If Flow control was enabled, it would be re-enabled. 781 */ 782 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 783 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); 784 val &= ~FLOW_CONTROL_ENABLE_BIT; 785 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 786 787 /* Set same Flow control for all RXQs */ 788 for (q = 0; q < port->nrxqs; q++) { 789 /* Set stop and start Flow control RXQ thresholds */ 790 val = MSS_THRESHOLD_START; 791 val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS); 792 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); 793 794 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); 795 /* Set RXQ port ID */ 796 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); 797 val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq)); 798 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) 799 + MSS_RXQ_ASS_HOSTID_OFFS)); 800 801 /* Calculate RXQ host ID: 802 * In Single queue mode: Host ID equal to Host ID used for 803 * shared RX interrupt 804 * In Multi queue mode: Host ID equal to number of 805 * RXQ ID / number of CoS queues 806 * In Single resource mode: Host ID always equal to 0 807 */ 808 if (queue_mode == MVPP2_QDIST_SINGLE_MODE) 809 host_id = port->nqvecs; 810 else if (queue_mode == MVPP2_QDIST_MULTI_MODE) 811 host_id = q; 812 else 813 host_id = 0; 814 815 /* Set RXQ host ID */ 816 val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq) 817 + MSS_RXQ_ASS_HOSTID_OFFS)); 818 819 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); 820 } 821 822 /* Notify Firmware that Flow control config space ready for update */ 823 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 824 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 825 val |= cm3_state; 826 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 827 828 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); 829 } 830 831 /* Routine disable flow control for RXQs condition */ 832 static void mvpp2_rxq_disable_fc(struct mvpp2_port *port) 833 { 834 int val, cm3_state, q; 835 unsigned long flags; 836 int fq = port->first_rxq; 837 838 spin_lock_irqsave(&port->priv->mss_spinlock, flags); 839 840 /* Remove Flow control enable bit to prevent race between FW and Kernel 841 * If Flow control was enabled, it would be re-enabled. 842 */ 843 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 844 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); 845 val &= ~FLOW_CONTROL_ENABLE_BIT; 846 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 847 848 /* Disable Flow control for all RXQs */ 849 for (q = 0; q < port->nrxqs; q++) { 850 /* Set threshold 0 to disable Flow control */ 851 val = 0; 852 val |= (0 << MSS_RXQ_TRESH_STOP_OFFS); 853 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); 854 855 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); 856 857 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); 858 859 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) 860 + MSS_RXQ_ASS_HOSTID_OFFS)); 861 862 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); 863 } 864 865 /* Notify Firmware that Flow control config space ready for update */ 866 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 867 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 868 val |= cm3_state; 869 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 870 871 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); 872 } 873 874 /* Routine disable/enable flow control for BM pool condition */ 875 static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port, 876 struct mvpp2_bm_pool *pool, 877 bool en) 878 { 879 int val, cm3_state; 880 unsigned long flags; 881 882 spin_lock_irqsave(&port->priv->mss_spinlock, flags); 883 884 /* Remove Flow control enable bit to prevent race between FW and Kernel 885 * If Flow control were enabled, it would be re-enabled. 886 */ 887 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 888 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); 889 val &= ~FLOW_CONTROL_ENABLE_BIT; 890 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 891 892 /* Check if BM pool should be enabled/disable */ 893 if (en) { 894 /* Set BM pool start and stop thresholds per port */ 895 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); 896 val |= MSS_BUF_POOL_PORT_OFFS(port->id); 897 val &= ~MSS_BUF_POOL_START_MASK; 898 val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS); 899 val &= ~MSS_BUF_POOL_STOP_MASK; 900 val |= MSS_THRESHOLD_STOP; 901 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); 902 } else { 903 /* Remove BM pool from the port */ 904 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); 905 val &= ~MSS_BUF_POOL_PORT_OFFS(port->id); 906 907 /* Zero BM pool start and stop thresholds to disable pool 908 * flow control if pool empty (not used by any port) 909 */ 910 if (!pool->buf_num) { 911 val &= ~MSS_BUF_POOL_START_MASK; 912 val &= ~MSS_BUF_POOL_STOP_MASK; 913 } 914 915 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); 916 } 917 918 /* Notify Firmware that Flow control config space ready for update */ 919 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 920 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 921 val |= cm3_state; 922 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 923 924 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); 925 } 926 927 /* disable/enable flow control for BM pool on all ports */ 928 static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en) 929 { 930 struct mvpp2_port *port; 931 int i; 932 933 for (i = 0; i < priv->port_count; i++) { 934 port = priv->port_list[i]; 935 if (port->priv->percpu_pools) { 936 for (i = 0; i < port->nrxqs; i++) 937 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], 938 port->tx_fc & en); 939 } else { 940 mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en); 941 mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en); 942 } 943 } 944 } 945 946 static int mvpp2_enable_global_fc(struct mvpp2 *priv) 947 { 948 int val, timeout = 0; 949 950 /* Enable global flow control. In this stage global 951 * flow control enabled, but still disabled per port. 952 */ 953 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); 954 val |= FLOW_CONTROL_ENABLE_BIT; 955 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val); 956 957 /* Check if Firmware running and disable FC if not*/ 958 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 959 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val); 960 961 while (timeout < MSS_FC_MAX_TIMEOUT) { 962 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); 963 964 if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT)) 965 return 0; 966 usleep_range(10, 20); 967 timeout++; 968 } 969 970 priv->global_tx_fc = false; 971 return -EOPNOTSUPP; 972 } 973 974 /* Release buffer to BM */ 975 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 976 dma_addr_t buf_dma_addr, 977 phys_addr_t buf_phys_addr) 978 { 979 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 980 unsigned long flags = 0; 981 982 if (test_bit(thread, &port->priv->lock_map)) 983 spin_lock_irqsave(&port->bm_lock[thread], flags); 984 985 if (port->priv->hw_version >= MVPP22) { 986 u32 val = 0; 987 988 if (sizeof(dma_addr_t) == 8) 989 val |= upper_32_bits(buf_dma_addr) & 990 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 991 992 if (sizeof(phys_addr_t) == 8) 993 val |= (upper_32_bits(buf_phys_addr) 994 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 995 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 996 997 mvpp2_thread_write_relaxed(port->priv, thread, 998 MVPP22_BM_ADDR_HIGH_RLS_REG, val); 999 } 1000 1001 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 1002 * returned in the "cookie" field of the RX 1003 * descriptor. Instead of storing the virtual address, we 1004 * store the physical address 1005 */ 1006 mvpp2_thread_write_relaxed(port->priv, thread, 1007 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 1008 mvpp2_thread_write_relaxed(port->priv, thread, 1009 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 1010 1011 if (test_bit(thread, &port->priv->lock_map)) 1012 spin_unlock_irqrestore(&port->bm_lock[thread], flags); 1013 1014 put_cpu(); 1015 } 1016 1017 /* Allocate buffers for the pool */ 1018 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 1019 struct mvpp2_bm_pool *bm_pool, int buf_num) 1020 { 1021 int i, buf_size, total_size; 1022 dma_addr_t dma_addr; 1023 phys_addr_t phys_addr; 1024 struct page_pool *pp = NULL; 1025 void *buf; 1026 1027 if (port->priv->percpu_pools && 1028 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { 1029 netdev_err(port->dev, 1030 "attempted to use jumbo frames with per-cpu pools"); 1031 return 0; 1032 } 1033 1034 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); 1035 total_size = MVPP2_RX_TOTAL_SIZE(buf_size); 1036 1037 if (buf_num < 0 || 1038 (buf_num + bm_pool->buf_num > bm_pool->size)) { 1039 netdev_err(port->dev, 1040 "cannot allocate %d buffers for pool %d\n", 1041 buf_num, bm_pool->id); 1042 return 0; 1043 } 1044 1045 if (port->priv->percpu_pools) 1046 pp = port->priv->page_pool[bm_pool->id]; 1047 for (i = 0; i < buf_num; i++) { 1048 buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr, 1049 &phys_addr, GFP_KERNEL); 1050 if (!buf) 1051 break; 1052 1053 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, 1054 phys_addr); 1055 } 1056 1057 /* Update BM driver with number of buffers added to pool */ 1058 bm_pool->buf_num += i; 1059 1060 netdev_dbg(port->dev, 1061 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", 1062 bm_pool->id, bm_pool->pkt_size, buf_size, total_size); 1063 1064 netdev_dbg(port->dev, 1065 "pool %d: %d of %d buffers added\n", 1066 bm_pool->id, i, buf_num); 1067 return i; 1068 } 1069 1070 /* Notify the driver that BM pool is being used as specific type and return the 1071 * pool pointer on success 1072 */ 1073 static struct mvpp2_bm_pool * 1074 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) 1075 { 1076 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 1077 int num; 1078 1079 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || 1080 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { 1081 netdev_err(port->dev, "Invalid pool %d\n", pool); 1082 return NULL; 1083 } 1084 1085 /* Allocate buffers in case BM pool is used as long pool, but packet 1086 * size doesn't match MTU or BM pool hasn't being used yet 1087 */ 1088 if (new_pool->pkt_size == 0) { 1089 int pkts_num; 1090 1091 /* Set default buffer number or free all the buffers in case 1092 * the pool is not empty 1093 */ 1094 pkts_num = new_pool->buf_num; 1095 if (pkts_num == 0) { 1096 if (port->priv->percpu_pools) { 1097 if (pool < port->nrxqs) 1098 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; 1099 else 1100 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; 1101 } else { 1102 pkts_num = mvpp2_pools[pool].buf_num; 1103 } 1104 } else { 1105 mvpp2_bm_bufs_free(port->dev->dev.parent, 1106 port->priv, new_pool, pkts_num); 1107 } 1108 1109 new_pool->pkt_size = pkt_size; 1110 new_pool->frag_size = 1111 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 1112 MVPP2_SKB_SHINFO_SIZE; 1113 1114 /* Allocate buffers for this pool */ 1115 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 1116 if (num != pkts_num) { 1117 WARN(1, "pool %d: %d of %d allocated\n", 1118 new_pool->id, num, pkts_num); 1119 return NULL; 1120 } 1121 } 1122 1123 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 1124 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 1125 1126 return new_pool; 1127 } 1128 1129 static struct mvpp2_bm_pool * 1130 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, 1131 unsigned int pool, int pkt_size) 1132 { 1133 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 1134 int num; 1135 1136 if (pool > port->nrxqs * 2) { 1137 netdev_err(port->dev, "Invalid pool %d\n", pool); 1138 return NULL; 1139 } 1140 1141 /* Allocate buffers in case BM pool is used as long pool, but packet 1142 * size doesn't match MTU or BM pool hasn't being used yet 1143 */ 1144 if (new_pool->pkt_size == 0) { 1145 int pkts_num; 1146 1147 /* Set default buffer number or free all the buffers in case 1148 * the pool is not empty 1149 */ 1150 pkts_num = new_pool->buf_num; 1151 if (pkts_num == 0) 1152 pkts_num = mvpp2_pools[type].buf_num; 1153 else 1154 mvpp2_bm_bufs_free(port->dev->dev.parent, 1155 port->priv, new_pool, pkts_num); 1156 1157 new_pool->pkt_size = pkt_size; 1158 new_pool->frag_size = 1159 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 1160 MVPP2_SKB_SHINFO_SIZE; 1161 1162 /* Allocate buffers for this pool */ 1163 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 1164 if (num != pkts_num) { 1165 WARN(1, "pool %d: %d of %d allocated\n", 1166 new_pool->id, num, pkts_num); 1167 return NULL; 1168 } 1169 } 1170 1171 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 1172 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 1173 1174 return new_pool; 1175 } 1176 1177 /* Initialize pools for swf, shared buffers variant */ 1178 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) 1179 { 1180 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; 1181 int rxq; 1182 1183 /* If port pkt_size is higher than 1518B: 1184 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool 1185 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool 1186 */ 1187 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { 1188 long_log_pool = MVPP2_BM_JUMBO; 1189 short_log_pool = MVPP2_BM_LONG; 1190 } else { 1191 long_log_pool = MVPP2_BM_LONG; 1192 short_log_pool = MVPP2_BM_SHORT; 1193 } 1194 1195 if (!port->pool_long) { 1196 port->pool_long = 1197 mvpp2_bm_pool_use(port, long_log_pool, 1198 mvpp2_pools[long_log_pool].pkt_size); 1199 if (!port->pool_long) 1200 return -ENOMEM; 1201 1202 port->pool_long->port_map |= BIT(port->id); 1203 1204 for (rxq = 0; rxq < port->nrxqs; rxq++) 1205 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 1206 } 1207 1208 if (!port->pool_short) { 1209 port->pool_short = 1210 mvpp2_bm_pool_use(port, short_log_pool, 1211 mvpp2_pools[short_log_pool].pkt_size); 1212 if (!port->pool_short) 1213 return -ENOMEM; 1214 1215 port->pool_short->port_map |= BIT(port->id); 1216 1217 for (rxq = 0; rxq < port->nrxqs; rxq++) 1218 mvpp2_rxq_short_pool_set(port, rxq, 1219 port->pool_short->id); 1220 } 1221 1222 return 0; 1223 } 1224 1225 /* Initialize pools for swf, percpu buffers variant */ 1226 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) 1227 { 1228 struct mvpp2_bm_pool *bm_pool; 1229 int i; 1230 1231 for (i = 0; i < port->nrxqs; i++) { 1232 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, 1233 mvpp2_pools[MVPP2_BM_SHORT].pkt_size); 1234 if (!bm_pool) 1235 return -ENOMEM; 1236 1237 bm_pool->port_map |= BIT(port->id); 1238 mvpp2_rxq_short_pool_set(port, i, bm_pool->id); 1239 } 1240 1241 for (i = 0; i < port->nrxqs; i++) { 1242 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, 1243 mvpp2_pools[MVPP2_BM_LONG].pkt_size); 1244 if (!bm_pool) 1245 return -ENOMEM; 1246 1247 bm_pool->port_map |= BIT(port->id); 1248 mvpp2_rxq_long_pool_set(port, i, bm_pool->id); 1249 } 1250 1251 port->pool_long = NULL; 1252 port->pool_short = NULL; 1253 1254 return 0; 1255 } 1256 1257 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 1258 { 1259 if (port->priv->percpu_pools) 1260 return mvpp2_swf_bm_pool_init_percpu(port); 1261 else 1262 return mvpp2_swf_bm_pool_init_shared(port); 1263 } 1264 1265 static void mvpp2_set_hw_csum(struct mvpp2_port *port, 1266 enum mvpp2_bm_pool_log_num new_long_pool) 1267 { 1268 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1269 1270 /* Update L4 checksum when jumbo enable/disable on port. 1271 * Only port 0 supports hardware checksum offload due to 1272 * the Tx FIFO size limitation. 1273 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor 1274 * has 7 bits, so the maximum L3 offset is 128. 1275 */ 1276 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { 1277 port->dev->features &= ~csums; 1278 port->dev->hw_features &= ~csums; 1279 } else { 1280 port->dev->features |= csums; 1281 port->dev->hw_features |= csums; 1282 } 1283 } 1284 1285 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) 1286 { 1287 struct mvpp2_port *port = netdev_priv(dev); 1288 enum mvpp2_bm_pool_log_num new_long_pool; 1289 int pkt_size = MVPP2_RX_PKT_SIZE(mtu); 1290 1291 if (port->priv->percpu_pools) 1292 goto out_set; 1293 1294 /* If port MTU is higher than 1518B: 1295 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool 1296 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool 1297 */ 1298 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 1299 new_long_pool = MVPP2_BM_JUMBO; 1300 else 1301 new_long_pool = MVPP2_BM_LONG; 1302 1303 if (new_long_pool != port->pool_long->id) { 1304 if (port->tx_fc) { 1305 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 1306 mvpp2_bm_pool_update_fc(port, 1307 port->pool_short, 1308 false); 1309 else 1310 mvpp2_bm_pool_update_fc(port, port->pool_long, 1311 false); 1312 } 1313 1314 /* Remove port from old short & long pool */ 1315 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, 1316 port->pool_long->pkt_size); 1317 port->pool_long->port_map &= ~BIT(port->id); 1318 port->pool_long = NULL; 1319 1320 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, 1321 port->pool_short->pkt_size); 1322 port->pool_short->port_map &= ~BIT(port->id); 1323 port->pool_short = NULL; 1324 1325 port->pkt_size = pkt_size; 1326 1327 /* Add port to new short & long pool */ 1328 mvpp2_swf_bm_pool_init(port); 1329 1330 mvpp2_set_hw_csum(port, new_long_pool); 1331 1332 if (port->tx_fc) { 1333 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 1334 mvpp2_bm_pool_update_fc(port, port->pool_long, 1335 true); 1336 else 1337 mvpp2_bm_pool_update_fc(port, port->pool_short, 1338 true); 1339 } 1340 1341 /* Update L4 checksum when jumbo enable/disable on port */ 1342 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { 1343 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 1344 dev->hw_features &= ~(NETIF_F_IP_CSUM | 1345 NETIF_F_IPV6_CSUM); 1346 } else { 1347 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1348 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1349 } 1350 } 1351 1352 out_set: 1353 dev->mtu = mtu; 1354 dev->wanted_features = dev->features; 1355 1356 netdev_update_features(dev); 1357 return 0; 1358 } 1359 1360 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) 1361 { 1362 int i, sw_thread_mask = 0; 1363 1364 for (i = 0; i < port->nqvecs; i++) 1365 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 1366 1367 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1368 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); 1369 } 1370 1371 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) 1372 { 1373 int i, sw_thread_mask = 0; 1374 1375 for (i = 0; i < port->nqvecs; i++) 1376 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 1377 1378 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1379 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); 1380 } 1381 1382 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) 1383 { 1384 struct mvpp2_port *port = qvec->port; 1385 1386 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1387 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); 1388 } 1389 1390 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) 1391 { 1392 struct mvpp2_port *port = qvec->port; 1393 1394 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1395 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); 1396 } 1397 1398 /* Mask the current thread's Rx/Tx interrupts 1399 * Called by on_each_cpu(), guaranteed to run with migration disabled, 1400 * using smp_processor_id() is OK. 1401 */ 1402 static void mvpp2_interrupts_mask(void *arg) 1403 { 1404 struct mvpp2_port *port = arg; 1405 int cpu = smp_processor_id(); 1406 u32 thread; 1407 1408 /* If the thread isn't used, don't do anything */ 1409 if (cpu > port->priv->nthreads) 1410 return; 1411 1412 thread = mvpp2_cpu_to_thread(port->priv, cpu); 1413 1414 mvpp2_thread_write(port->priv, thread, 1415 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); 1416 mvpp2_thread_write(port->priv, thread, 1417 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0); 1418 } 1419 1420 /* Unmask the current thread's Rx/Tx interrupts. 1421 * Called by on_each_cpu(), guaranteed to run with migration disabled, 1422 * using smp_processor_id() is OK. 1423 */ 1424 static void mvpp2_interrupts_unmask(void *arg) 1425 { 1426 struct mvpp2_port *port = arg; 1427 int cpu = smp_processor_id(); 1428 u32 val, thread; 1429 1430 /* If the thread isn't used, don't do anything */ 1431 if (cpu >= port->priv->nthreads) 1432 return; 1433 1434 thread = mvpp2_cpu_to_thread(port->priv, cpu); 1435 1436 val = MVPP2_CAUSE_MISC_SUM_MASK | 1437 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); 1438 if (port->has_tx_irqs) 1439 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 1440 1441 mvpp2_thread_write(port->priv, thread, 1442 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 1443 mvpp2_thread_write(port->priv, thread, 1444 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 1445 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); 1446 } 1447 1448 static void 1449 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) 1450 { 1451 u32 val; 1452 int i; 1453 1454 if (port->priv->hw_version == MVPP21) 1455 return; 1456 1457 if (mask) 1458 val = 0; 1459 else 1460 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); 1461 1462 for (i = 0; i < port->nqvecs; i++) { 1463 struct mvpp2_queue_vector *v = port->qvecs + i; 1464 1465 if (v->type != MVPP2_QUEUE_VECTOR_SHARED) 1466 continue; 1467 1468 mvpp2_thread_write(port->priv, v->sw_thread_id, 1469 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 1470 mvpp2_thread_write(port->priv, v->sw_thread_id, 1471 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 1472 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); 1473 } 1474 } 1475 1476 /* Only GOP port 0 has an XLG MAC */ 1477 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port) 1478 { 1479 return port->gop_id == 0; 1480 } 1481 1482 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) 1483 { 1484 return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0); 1485 } 1486 1487 /* Port configuration routines */ 1488 static bool mvpp2_is_xlg(phy_interface_t interface) 1489 { 1490 return interface == PHY_INTERFACE_MODE_10GBASER || 1491 interface == PHY_INTERFACE_MODE_5GBASER || 1492 interface == PHY_INTERFACE_MODE_XAUI; 1493 } 1494 1495 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set) 1496 { 1497 u32 old, val; 1498 1499 old = val = readl(ptr); 1500 val &= ~mask; 1501 val |= set; 1502 if (old != val) 1503 writel(val, ptr); 1504 } 1505 1506 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) 1507 { 1508 struct mvpp2 *priv = port->priv; 1509 u32 val; 1510 1511 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1512 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; 1513 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1514 1515 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 1516 if (port->gop_id == 2) 1517 val |= GENCONF_CTRL0_PORT2_RGMII; 1518 else if (port->gop_id == 3) 1519 val |= GENCONF_CTRL0_PORT3_RGMII_MII; 1520 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 1521 } 1522 1523 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) 1524 { 1525 struct mvpp2 *priv = port->priv; 1526 u32 val; 1527 1528 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1529 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | 1530 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; 1531 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1532 1533 if (port->gop_id > 1) { 1534 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 1535 if (port->gop_id == 2) 1536 val &= ~GENCONF_CTRL0_PORT2_RGMII; 1537 else if (port->gop_id == 3) 1538 val &= ~GENCONF_CTRL0_PORT3_RGMII_MII; 1539 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 1540 } 1541 } 1542 1543 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) 1544 { 1545 struct mvpp2 *priv = port->priv; 1546 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 1547 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 1548 u32 val; 1549 1550 val = readl(xpcs + MVPP22_XPCS_CFG0); 1551 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | 1552 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); 1553 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); 1554 writel(val, xpcs + MVPP22_XPCS_CFG0); 1555 1556 val = readl(mpcs + MVPP22_MPCS_CTRL); 1557 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; 1558 writel(val, mpcs + MVPP22_MPCS_CTRL); 1559 1560 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 1561 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); 1562 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); 1563 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 1564 } 1565 1566 static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en) 1567 { 1568 struct mvpp2 *priv = port->priv; 1569 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); 1570 u32 val; 1571 1572 val = readl(fca + MVPP22_FCA_CONTROL_REG); 1573 val &= ~MVPP22_FCA_ENABLE_PERIODIC; 1574 if (en) 1575 val |= MVPP22_FCA_ENABLE_PERIODIC; 1576 writel(val, fca + MVPP22_FCA_CONTROL_REG); 1577 } 1578 1579 static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer) 1580 { 1581 struct mvpp2 *priv = port->priv; 1582 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); 1583 u32 lsb, msb; 1584 1585 lsb = timer & MVPP22_FCA_REG_MASK; 1586 msb = timer >> MVPP22_FCA_REG_SIZE; 1587 1588 writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG); 1589 writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG); 1590 } 1591 1592 /* Set Flow Control timer x100 faster than pause quanta to ensure that link 1593 * partner won't send traffic if port is in XOFF mode. 1594 */ 1595 static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port) 1596 { 1597 u32 timer; 1598 1599 timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER)) 1600 * FC_QUANTA; 1601 1602 mvpp22_gop_fca_enable_periodic(port, false); 1603 1604 mvpp22_gop_fca_set_timer(port, timer); 1605 1606 mvpp22_gop_fca_enable_periodic(port, true); 1607 } 1608 1609 static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface) 1610 { 1611 struct mvpp2 *priv = port->priv; 1612 u32 val; 1613 1614 if (!priv->sysctrl_base) 1615 return 0; 1616 1617 switch (interface) { 1618 case PHY_INTERFACE_MODE_RGMII: 1619 case PHY_INTERFACE_MODE_RGMII_ID: 1620 case PHY_INTERFACE_MODE_RGMII_RXID: 1621 case PHY_INTERFACE_MODE_RGMII_TXID: 1622 if (!mvpp2_port_supports_rgmii(port)) 1623 goto invalid_conf; 1624 mvpp22_gop_init_rgmii(port); 1625 break; 1626 case PHY_INTERFACE_MODE_SGMII: 1627 case PHY_INTERFACE_MODE_1000BASEX: 1628 case PHY_INTERFACE_MODE_2500BASEX: 1629 mvpp22_gop_init_sgmii(port); 1630 break; 1631 case PHY_INTERFACE_MODE_5GBASER: 1632 case PHY_INTERFACE_MODE_10GBASER: 1633 if (!mvpp2_port_supports_xlg(port)) 1634 goto invalid_conf; 1635 mvpp22_gop_init_10gkr(port); 1636 break; 1637 default: 1638 goto unsupported_conf; 1639 } 1640 1641 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); 1642 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | 1643 GENCONF_PORT_CTRL1_EN(port->gop_id); 1644 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); 1645 1646 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1647 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; 1648 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1649 1650 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); 1651 val |= GENCONF_SOFT_RESET1_GOP; 1652 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); 1653 1654 mvpp22_gop_fca_set_periodic_timer(port); 1655 1656 unsupported_conf: 1657 return 0; 1658 1659 invalid_conf: 1660 netdev_err(port->dev, "Invalid port configuration\n"); 1661 return -EINVAL; 1662 } 1663 1664 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) 1665 { 1666 u32 val; 1667 1668 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1669 phy_interface_mode_is_8023z(port->phy_interface) || 1670 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1671 /* Enable the GMAC link status irq for this port */ 1672 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 1673 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 1674 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 1675 } 1676 1677 if (mvpp2_port_supports_xlg(port)) { 1678 /* Enable the XLG/GIG irqs for this port */ 1679 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 1680 if (mvpp2_is_xlg(port->phy_interface)) 1681 val |= MVPP22_XLG_EXT_INT_MASK_XLG; 1682 else 1683 val |= MVPP22_XLG_EXT_INT_MASK_GIG; 1684 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 1685 } 1686 } 1687 1688 static void mvpp22_gop_mask_irq(struct mvpp2_port *port) 1689 { 1690 u32 val; 1691 1692 if (mvpp2_port_supports_xlg(port)) { 1693 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 1694 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | 1695 MVPP22_XLG_EXT_INT_MASK_GIG); 1696 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 1697 } 1698 1699 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1700 phy_interface_mode_is_8023z(port->phy_interface) || 1701 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1702 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 1703 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 1704 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 1705 } 1706 } 1707 1708 static void mvpp22_gop_setup_irq(struct mvpp2_port *port) 1709 { 1710 u32 val; 1711 1712 mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK, 1713 MVPP22_GMAC_INT_SUM_MASK_PTP, 1714 MVPP22_GMAC_INT_SUM_MASK_PTP); 1715 1716 if (port->phylink || 1717 phy_interface_mode_is_rgmii(port->phy_interface) || 1718 phy_interface_mode_is_8023z(port->phy_interface) || 1719 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1720 val = readl(port->base + MVPP22_GMAC_INT_MASK); 1721 val |= MVPP22_GMAC_INT_MASK_LINK_STAT; 1722 writel(val, port->base + MVPP22_GMAC_INT_MASK); 1723 } 1724 1725 if (mvpp2_port_supports_xlg(port)) { 1726 val = readl(port->base + MVPP22_XLG_INT_MASK); 1727 val |= MVPP22_XLG_INT_MASK_LINK; 1728 writel(val, port->base + MVPP22_XLG_INT_MASK); 1729 1730 mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK, 1731 MVPP22_XLG_EXT_INT_MASK_PTP, 1732 MVPP22_XLG_EXT_INT_MASK_PTP); 1733 } 1734 1735 mvpp22_gop_unmask_irq(port); 1736 } 1737 1738 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes). 1739 * 1740 * The PHY mode used by the PPv2 driver comes from the network subsystem, while 1741 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they 1742 * differ. 1743 * 1744 * The COMPHY configures the serdes lanes regardless of the actual use of the 1745 * lanes by the physical layer. This is why configurations like 1746 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. 1747 */ 1748 static int mvpp22_comphy_init(struct mvpp2_port *port, 1749 phy_interface_t interface) 1750 { 1751 int ret; 1752 1753 if (!port->comphy) 1754 return 0; 1755 1756 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface); 1757 if (ret) 1758 return ret; 1759 1760 return phy_power_on(port->comphy); 1761 } 1762 1763 static void mvpp2_port_enable(struct mvpp2_port *port) 1764 { 1765 u32 val; 1766 1767 if (mvpp2_port_supports_xlg(port) && 1768 mvpp2_is_xlg(port->phy_interface)) { 1769 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 1770 val |= MVPP22_XLG_CTRL0_PORT_EN; 1771 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; 1772 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1773 } else { 1774 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1775 val |= MVPP2_GMAC_PORT_EN_MASK; 1776 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 1777 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1778 } 1779 } 1780 1781 static void mvpp2_port_disable(struct mvpp2_port *port) 1782 { 1783 u32 val; 1784 1785 if (mvpp2_port_supports_xlg(port) && 1786 mvpp2_is_xlg(port->phy_interface)) { 1787 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 1788 val &= ~MVPP22_XLG_CTRL0_PORT_EN; 1789 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1790 } 1791 1792 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1793 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 1794 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1795 } 1796 1797 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 1798 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 1799 { 1800 u32 val; 1801 1802 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 1803 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 1804 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 1805 } 1806 1807 /* Configure loopback port */ 1808 static void mvpp2_port_loopback_set(struct mvpp2_port *port, 1809 const struct phylink_link_state *state) 1810 { 1811 u32 val; 1812 1813 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 1814 1815 if (state->speed == 1000) 1816 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 1817 else 1818 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 1819 1820 if (phy_interface_mode_is_8023z(state->interface) || 1821 state->interface == PHY_INTERFACE_MODE_SGMII) 1822 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 1823 else 1824 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 1825 1826 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 1827 } 1828 1829 enum { 1830 ETHTOOL_XDP_REDIRECT, 1831 ETHTOOL_XDP_PASS, 1832 ETHTOOL_XDP_DROP, 1833 ETHTOOL_XDP_TX, 1834 ETHTOOL_XDP_TX_ERR, 1835 ETHTOOL_XDP_XMIT, 1836 ETHTOOL_XDP_XMIT_ERR, 1837 }; 1838 1839 struct mvpp2_ethtool_counter { 1840 unsigned int offset; 1841 const char string[ETH_GSTRING_LEN]; 1842 bool reg_is_64b; 1843 }; 1844 1845 static u64 mvpp2_read_count(struct mvpp2_port *port, 1846 const struct mvpp2_ethtool_counter *counter) 1847 { 1848 u64 val; 1849 1850 val = readl(port->stats_base + counter->offset); 1851 if (counter->reg_is_64b) 1852 val += (u64)readl(port->stats_base + counter->offset + 4) << 32; 1853 1854 return val; 1855 } 1856 1857 /* Some counters are accessed indirectly by first writing an index to 1858 * MVPP2_CTRS_IDX. The index can represent various resources depending on the 1859 * register we access, it can be a hit counter for some classification tables, 1860 * a counter specific to a rxq, a txq or a buffer pool. 1861 */ 1862 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) 1863 { 1864 mvpp2_write(priv, MVPP2_CTRS_IDX, index); 1865 return mvpp2_read(priv, reg); 1866 } 1867 1868 /* Due to the fact that software statistics and hardware statistics are, by 1869 * design, incremented at different moments in the chain of packet processing, 1870 * it is very likely that incoming packets could have been dropped after being 1871 * counted by hardware but before reaching software statistics (most probably 1872 * multicast packets), and in the opposite way, during transmission, FCS bytes 1873 * are added in between as well as TSO skb will be split and header bytes added. 1874 * Hence, statistics gathered from userspace with ifconfig (software) and 1875 * ethtool (hardware) cannot be compared. 1876 */ 1877 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = { 1878 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, 1879 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, 1880 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, 1881 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, 1882 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, 1883 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, 1884 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, 1885 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, 1886 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, 1887 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, 1888 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, 1889 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, 1890 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, 1891 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, 1892 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, 1893 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, 1894 { MVPP2_MIB_FC_SENT, "fc_sent" }, 1895 { MVPP2_MIB_FC_RCVD, "fc_received" }, 1896 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, 1897 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, 1898 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, 1899 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, 1900 { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, 1901 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, 1902 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, 1903 { MVPP2_MIB_COLLISION, "collision" }, 1904 { MVPP2_MIB_LATE_COLLISION, "late_collision" }, 1905 }; 1906 1907 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = { 1908 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" }, 1909 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" }, 1910 }; 1911 1912 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = { 1913 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" }, 1914 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" }, 1915 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" }, 1916 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" }, 1917 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" }, 1918 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" }, 1919 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" }, 1920 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" }, 1921 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" }, 1922 }; 1923 1924 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { 1925 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" }, 1926 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" }, 1927 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" }, 1928 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, 1929 }; 1930 1931 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = { 1932 { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", }, 1933 { ETHTOOL_XDP_PASS, "rx_xdp_pass", }, 1934 { ETHTOOL_XDP_DROP, "rx_xdp_drop", }, 1935 { ETHTOOL_XDP_TX, "rx_xdp_tx", }, 1936 { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", }, 1937 { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", }, 1938 { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", }, 1939 }; 1940 1941 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ 1942 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ 1943 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ 1944 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \ 1945 ARRAY_SIZE(mvpp2_ethtool_xdp)) 1946 1947 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, 1948 u8 *data) 1949 { 1950 struct mvpp2_port *port = netdev_priv(netdev); 1951 int i, q; 1952 1953 if (sset != ETH_SS_STATS) 1954 return; 1955 1956 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) { 1957 strscpy(data, mvpp2_ethtool_mib_regs[i].string, 1958 ETH_GSTRING_LEN); 1959 data += ETH_GSTRING_LEN; 1960 } 1961 1962 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) { 1963 strscpy(data, mvpp2_ethtool_port_regs[i].string, 1964 ETH_GSTRING_LEN); 1965 data += ETH_GSTRING_LEN; 1966 } 1967 1968 for (q = 0; q < port->ntxqs; q++) { 1969 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) { 1970 snprintf(data, ETH_GSTRING_LEN, 1971 mvpp2_ethtool_txq_regs[i].string, q); 1972 data += ETH_GSTRING_LEN; 1973 } 1974 } 1975 1976 for (q = 0; q < port->nrxqs; q++) { 1977 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) { 1978 snprintf(data, ETH_GSTRING_LEN, 1979 mvpp2_ethtool_rxq_regs[i].string, 1980 q); 1981 data += ETH_GSTRING_LEN; 1982 } 1983 } 1984 1985 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) { 1986 strscpy(data, mvpp2_ethtool_xdp[i].string, 1987 ETH_GSTRING_LEN); 1988 data += ETH_GSTRING_LEN; 1989 } 1990 } 1991 1992 static void 1993 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) 1994 { 1995 unsigned int start; 1996 unsigned int cpu; 1997 1998 /* Gather XDP Statistics */ 1999 for_each_possible_cpu(cpu) { 2000 struct mvpp2_pcpu_stats *cpu_stats; 2001 u64 xdp_redirect; 2002 u64 xdp_pass; 2003 u64 xdp_drop; 2004 u64 xdp_xmit; 2005 u64 xdp_xmit_err; 2006 u64 xdp_tx; 2007 u64 xdp_tx_err; 2008 2009 cpu_stats = per_cpu_ptr(port->stats, cpu); 2010 do { 2011 start = u64_stats_fetch_begin(&cpu_stats->syncp); 2012 xdp_redirect = cpu_stats->xdp_redirect; 2013 xdp_pass = cpu_stats->xdp_pass; 2014 xdp_drop = cpu_stats->xdp_drop; 2015 xdp_xmit = cpu_stats->xdp_xmit; 2016 xdp_xmit_err = cpu_stats->xdp_xmit_err; 2017 xdp_tx = cpu_stats->xdp_tx; 2018 xdp_tx_err = cpu_stats->xdp_tx_err; 2019 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 2020 2021 xdp_stats->xdp_redirect += xdp_redirect; 2022 xdp_stats->xdp_pass += xdp_pass; 2023 xdp_stats->xdp_drop += xdp_drop; 2024 xdp_stats->xdp_xmit += xdp_xmit; 2025 xdp_stats->xdp_xmit_err += xdp_xmit_err; 2026 xdp_stats->xdp_tx += xdp_tx; 2027 xdp_stats->xdp_tx_err += xdp_tx_err; 2028 } 2029 } 2030 2031 static void mvpp2_read_stats(struct mvpp2_port *port) 2032 { 2033 struct mvpp2_pcpu_stats xdp_stats = {}; 2034 const struct mvpp2_ethtool_counter *s; 2035 u64 *pstats; 2036 int i, q; 2037 2038 pstats = port->ethtool_stats; 2039 2040 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) 2041 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]); 2042 2043 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) 2044 *pstats++ += mvpp2_read(port->priv, 2045 mvpp2_ethtool_port_regs[i].offset + 2046 4 * port->id); 2047 2048 for (q = 0; q < port->ntxqs; q++) 2049 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) 2050 *pstats++ += mvpp2_read_index(port->priv, 2051 MVPP22_CTRS_TX_CTR(port->id, q), 2052 mvpp2_ethtool_txq_regs[i].offset); 2053 2054 /* Rxqs are numbered from 0 from the user standpoint, but not from the 2055 * driver's. We need to add the port->first_rxq offset. 2056 */ 2057 for (q = 0; q < port->nrxqs; q++) 2058 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) 2059 *pstats++ += mvpp2_read_index(port->priv, 2060 port->first_rxq + q, 2061 mvpp2_ethtool_rxq_regs[i].offset); 2062 2063 /* Gather XDP Statistics */ 2064 mvpp2_get_xdp_stats(port, &xdp_stats); 2065 2066 for (i = 0, s = mvpp2_ethtool_xdp; 2067 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp); 2068 s++, i++) { 2069 switch (s->offset) { 2070 case ETHTOOL_XDP_REDIRECT: 2071 *pstats++ = xdp_stats.xdp_redirect; 2072 break; 2073 case ETHTOOL_XDP_PASS: 2074 *pstats++ = xdp_stats.xdp_pass; 2075 break; 2076 case ETHTOOL_XDP_DROP: 2077 *pstats++ = xdp_stats.xdp_drop; 2078 break; 2079 case ETHTOOL_XDP_TX: 2080 *pstats++ = xdp_stats.xdp_tx; 2081 break; 2082 case ETHTOOL_XDP_TX_ERR: 2083 *pstats++ = xdp_stats.xdp_tx_err; 2084 break; 2085 case ETHTOOL_XDP_XMIT: 2086 *pstats++ = xdp_stats.xdp_xmit; 2087 break; 2088 case ETHTOOL_XDP_XMIT_ERR: 2089 *pstats++ = xdp_stats.xdp_xmit_err; 2090 break; 2091 } 2092 } 2093 } 2094 2095 static void mvpp2_gather_hw_statistics(struct work_struct *work) 2096 { 2097 struct delayed_work *del_work = to_delayed_work(work); 2098 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, 2099 stats_work); 2100 2101 mutex_lock(&port->gather_stats_lock); 2102 2103 mvpp2_read_stats(port); 2104 2105 /* No need to read again the counters right after this function if it 2106 * was called asynchronously by the user (ie. use of ethtool). 2107 */ 2108 cancel_delayed_work(&port->stats_work); 2109 queue_delayed_work(port->priv->stats_queue, &port->stats_work, 2110 MVPP2_MIB_COUNTERS_STATS_DELAY); 2111 2112 mutex_unlock(&port->gather_stats_lock); 2113 } 2114 2115 static void mvpp2_ethtool_get_stats(struct net_device *dev, 2116 struct ethtool_stats *stats, u64 *data) 2117 { 2118 struct mvpp2_port *port = netdev_priv(dev); 2119 2120 /* Update statistics for the given port, then take the lock to avoid 2121 * concurrent accesses on the ethtool_stats structure during its copy. 2122 */ 2123 mvpp2_gather_hw_statistics(&port->stats_work.work); 2124 2125 mutex_lock(&port->gather_stats_lock); 2126 memcpy(data, port->ethtool_stats, 2127 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); 2128 mutex_unlock(&port->gather_stats_lock); 2129 } 2130 2131 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) 2132 { 2133 struct mvpp2_port *port = netdev_priv(dev); 2134 2135 if (sset == ETH_SS_STATS) 2136 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); 2137 2138 return -EOPNOTSUPP; 2139 } 2140 2141 static void mvpp2_mac_reset_assert(struct mvpp2_port *port) 2142 { 2143 u32 val; 2144 2145 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | 2146 MVPP2_GMAC_PORT_RESET_MASK; 2147 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2148 2149 if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) { 2150 val = readl(port->base + MVPP22_XLG_CTRL0_REG) & 2151 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; 2152 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 2153 } 2154 } 2155 2156 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) 2157 { 2158 struct mvpp2 *priv = port->priv; 2159 void __iomem *mpcs, *xpcs; 2160 u32 val; 2161 2162 if (port->priv->hw_version == MVPP21 || port->gop_id != 0) 2163 return; 2164 2165 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 2166 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 2167 2168 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 2169 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); 2170 val |= MVPP22_MPCS_CLK_RESET_DIV_SET; 2171 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 2172 2173 val = readl(xpcs + MVPP22_XPCS_CFG0); 2174 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); 2175 } 2176 2177 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port, 2178 phy_interface_t interface) 2179 { 2180 struct mvpp2 *priv = port->priv; 2181 void __iomem *mpcs, *xpcs; 2182 u32 val; 2183 2184 if (port->priv->hw_version == MVPP21 || port->gop_id != 0) 2185 return; 2186 2187 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 2188 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 2189 2190 switch (interface) { 2191 case PHY_INTERFACE_MODE_5GBASER: 2192 case PHY_INTERFACE_MODE_10GBASER: 2193 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 2194 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | 2195 MAC_CLK_RESET_SD_TX; 2196 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; 2197 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 2198 break; 2199 case PHY_INTERFACE_MODE_XAUI: 2200 case PHY_INTERFACE_MODE_RXAUI: 2201 val = readl(xpcs + MVPP22_XPCS_CFG0); 2202 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); 2203 break; 2204 default: 2205 break; 2206 } 2207 } 2208 2209 /* Change maximum receive size of the port */ 2210 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2211 { 2212 u32 val; 2213 2214 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2215 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2216 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2217 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2218 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2219 } 2220 2221 /* Change maximum receive size of the port */ 2222 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) 2223 { 2224 u32 val; 2225 2226 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 2227 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; 2228 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2229 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; 2230 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 2231 } 2232 2233 /* Set defaults to the MVPP2 port */ 2234 static void mvpp2_defaults_set(struct mvpp2_port *port) 2235 { 2236 int tx_port_num, val, queue, lrxq; 2237 2238 if (port->priv->hw_version == MVPP21) { 2239 /* Update TX FIFO MIN Threshold */ 2240 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 2241 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 2242 /* Min. TX threshold must be less than minimal packet length */ 2243 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 2244 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 2245 } 2246 2247 /* Disable Legacy WRR, Disable EJP, Release from reset */ 2248 tx_port_num = mvpp2_egress_port(port); 2249 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 2250 tx_port_num); 2251 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 2252 2253 /* Set TXQ scheduling to Round-Robin */ 2254 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); 2255 2256 /* Close bandwidth for all queues */ 2257 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) 2258 mvpp2_write(port->priv, 2259 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); 2260 2261 /* Set refill period to 1 usec, refill tokens 2262 * and bucket size to maximum 2263 */ 2264 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 2265 port->priv->tclk / USEC_PER_SEC); 2266 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 2267 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 2268 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 2269 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 2270 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 2271 val = MVPP2_TXP_TOKEN_SIZE_MAX; 2272 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 2273 2274 /* Set MaximumLowLatencyPacketSize value to 256 */ 2275 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 2276 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 2277 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 2278 2279 /* Enable Rx cache snoop */ 2280 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 2281 queue = port->rxqs[lrxq]->id; 2282 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2283 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 2284 MVPP2_SNOOP_BUF_HDR_MASK; 2285 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2286 } 2287 2288 /* At default, mask all interrupts to all present cpus */ 2289 mvpp2_interrupts_disable(port); 2290 } 2291 2292 /* Enable/disable receiving packets */ 2293 static void mvpp2_ingress_enable(struct mvpp2_port *port) 2294 { 2295 u32 val; 2296 int lrxq, queue; 2297 2298 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 2299 queue = port->rxqs[lrxq]->id; 2300 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2301 val &= ~MVPP2_RXQ_DISABLE_MASK; 2302 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2303 } 2304 } 2305 2306 static void mvpp2_ingress_disable(struct mvpp2_port *port) 2307 { 2308 u32 val; 2309 int lrxq, queue; 2310 2311 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 2312 queue = port->rxqs[lrxq]->id; 2313 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2314 val |= MVPP2_RXQ_DISABLE_MASK; 2315 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2316 } 2317 } 2318 2319 /* Enable transmit via physical egress queue 2320 * - HW starts take descriptors from DRAM 2321 */ 2322 static void mvpp2_egress_enable(struct mvpp2_port *port) 2323 { 2324 u32 qmap; 2325 int queue; 2326 int tx_port_num = mvpp2_egress_port(port); 2327 2328 /* Enable all initialized TXs. */ 2329 qmap = 0; 2330 for (queue = 0; queue < port->ntxqs; queue++) { 2331 struct mvpp2_tx_queue *txq = port->txqs[queue]; 2332 2333 if (txq->descs) 2334 qmap |= (1 << queue); 2335 } 2336 2337 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2338 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 2339 } 2340 2341 /* Disable transmit via physical egress queue 2342 * - HW doesn't take descriptors from DRAM 2343 */ 2344 static void mvpp2_egress_disable(struct mvpp2_port *port) 2345 { 2346 u32 reg_data; 2347 int delay; 2348 int tx_port_num = mvpp2_egress_port(port); 2349 2350 /* Issue stop command for active channels only */ 2351 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2352 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 2353 MVPP2_TXP_SCHED_ENQ_MASK; 2354 if (reg_data != 0) 2355 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 2356 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 2357 2358 /* Wait for all Tx activity to terminate. */ 2359 delay = 0; 2360 do { 2361 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 2362 netdev_warn(port->dev, 2363 "Tx stop timed out, status=0x%08x\n", 2364 reg_data); 2365 break; 2366 } 2367 mdelay(1); 2368 delay++; 2369 2370 /* Check port TX Command register that all 2371 * Tx queues are stopped 2372 */ 2373 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 2374 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 2375 } 2376 2377 /* Rx descriptors helper methods */ 2378 2379 /* Get number of Rx descriptors occupied by received packets */ 2380 static inline int 2381 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 2382 { 2383 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 2384 2385 return val & MVPP2_RXQ_OCCUPIED_MASK; 2386 } 2387 2388 /* Update Rx queue status with the number of occupied and available 2389 * Rx descriptor slots. 2390 */ 2391 static inline void 2392 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 2393 int used_count, int free_count) 2394 { 2395 /* Decrement the number of used descriptors and increment count 2396 * increment the number of free descriptors. 2397 */ 2398 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 2399 2400 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 2401 } 2402 2403 /* Get pointer to next RX descriptor to be processed by SW */ 2404 static inline struct mvpp2_rx_desc * 2405 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 2406 { 2407 int rx_desc = rxq->next_desc_to_proc; 2408 2409 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 2410 prefetch(rxq->descs + rxq->next_desc_to_proc); 2411 return rxq->descs + rx_desc; 2412 } 2413 2414 /* Set rx queue offset */ 2415 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 2416 int prxq, int offset) 2417 { 2418 u32 val; 2419 2420 /* Convert offset from bytes to units of 32 bytes */ 2421 offset = offset >> 5; 2422 2423 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2424 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 2425 2426 /* Offset is in */ 2427 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 2428 MVPP2_RXQ_PACKET_OFFSET_MASK); 2429 2430 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2431 } 2432 2433 /* Tx descriptors helper methods */ 2434 2435 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 2436 static struct mvpp2_tx_desc * 2437 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 2438 { 2439 int tx_desc = txq->next_desc_to_proc; 2440 2441 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 2442 return txq->descs + tx_desc; 2443 } 2444 2445 /* Update HW with number of aggregated Tx descriptors to be sent 2446 * 2447 * Called only from mvpp2_tx(), so migration is disabled, using 2448 * smp_processor_id() is OK. 2449 */ 2450 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 2451 { 2452 /* aggregated access - relevant TXQ number is written in TX desc */ 2453 mvpp2_thread_write(port->priv, 2454 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2455 MVPP2_AGGR_TXQ_UPDATE_REG, pending); 2456 } 2457 2458 /* Check if there are enough free descriptors in aggregated txq. 2459 * If not, update the number of occupied descriptors and repeat the check. 2460 * 2461 * Called only from mvpp2_tx(), so migration is disabled, using 2462 * smp_processor_id() is OK. 2463 */ 2464 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, 2465 struct mvpp2_tx_queue *aggr_txq, int num) 2466 { 2467 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { 2468 /* Update number of occupied aggregated Tx descriptors */ 2469 unsigned int thread = 2470 mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 2471 u32 val = mvpp2_read_relaxed(port->priv, 2472 MVPP2_AGGR_TXQ_STATUS_REG(thread)); 2473 2474 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; 2475 2476 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) 2477 return -ENOMEM; 2478 } 2479 return 0; 2480 } 2481 2482 /* Reserved Tx descriptors allocation request 2483 * 2484 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called 2485 * only by mvpp2_tx(), so migration is disabled, using 2486 * smp_processor_id() is OK. 2487 */ 2488 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, 2489 struct mvpp2_tx_queue *txq, int num) 2490 { 2491 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 2492 struct mvpp2 *priv = port->priv; 2493 u32 val; 2494 2495 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; 2496 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); 2497 2498 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); 2499 2500 return val & MVPP2_TXQ_RSVD_RSLT_MASK; 2501 } 2502 2503 /* Check if there are enough reserved descriptors for transmission. 2504 * If not, request chunk of reserved descriptors and check again. 2505 */ 2506 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, 2507 struct mvpp2_tx_queue *txq, 2508 struct mvpp2_txq_pcpu *txq_pcpu, 2509 int num) 2510 { 2511 int req, desc_count; 2512 unsigned int thread; 2513 2514 if (txq_pcpu->reserved_num >= num) 2515 return 0; 2516 2517 /* Not enough descriptors reserved! Update the reserved descriptor 2518 * count and check again. 2519 */ 2520 2521 desc_count = 0; 2522 /* Compute total of used descriptors */ 2523 for (thread = 0; thread < port->priv->nthreads; thread++) { 2524 struct mvpp2_txq_pcpu *txq_pcpu_aux; 2525 2526 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); 2527 desc_count += txq_pcpu_aux->count; 2528 desc_count += txq_pcpu_aux->reserved_num; 2529 } 2530 2531 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); 2532 desc_count += req; 2533 2534 if (desc_count > 2535 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) 2536 return -ENOMEM; 2537 2538 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); 2539 2540 /* OK, the descriptor could have been updated: check again. */ 2541 if (txq_pcpu->reserved_num < num) 2542 return -ENOMEM; 2543 return 0; 2544 } 2545 2546 /* Release the last allocated Tx descriptor. Useful to handle DMA 2547 * mapping failures in the Tx path. 2548 */ 2549 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) 2550 { 2551 if (txq->next_desc_to_proc == 0) 2552 txq->next_desc_to_proc = txq->last_desc - 1; 2553 else 2554 txq->next_desc_to_proc--; 2555 } 2556 2557 /* Set Tx descriptors fields relevant for CSUM calculation */ 2558 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, 2559 int ip_hdr_len, int l4_proto) 2560 { 2561 u32 command; 2562 2563 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 2564 * G_L4_chk, L4_type required only for checksum calculation 2565 */ 2566 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); 2567 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); 2568 command |= MVPP2_TXD_IP_CSUM_DISABLE; 2569 2570 if (l3_proto == htons(ETH_P_IP)) { 2571 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ 2572 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ 2573 } else { 2574 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ 2575 } 2576 2577 if (l4_proto == IPPROTO_TCP) { 2578 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ 2579 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 2580 } else if (l4_proto == IPPROTO_UDP) { 2581 command |= MVPP2_TXD_L4_UDP; /* enable UDP */ 2582 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 2583 } else { 2584 command |= MVPP2_TXD_L4_CSUM_NOT; 2585 } 2586 2587 return command; 2588 } 2589 2590 /* Get number of sent descriptors and decrement counter. 2591 * The number of sent descriptors is returned. 2592 * Per-thread access 2593 * 2594 * Called only from mvpp2_txq_done(), called from mvpp2_tx() 2595 * (migration disabled) and from the TX completion tasklet (migration 2596 * disabled) so using smp_processor_id() is OK. 2597 */ 2598 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 2599 struct mvpp2_tx_queue *txq) 2600 { 2601 u32 val; 2602 2603 /* Reading status reg resets transmitted descriptor counter */ 2604 val = mvpp2_thread_read_relaxed(port->priv, 2605 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2606 MVPP2_TXQ_SENT_REG(txq->id)); 2607 2608 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 2609 MVPP2_TRANSMITTED_COUNT_OFFSET; 2610 } 2611 2612 /* Called through on_each_cpu(), so runs on all CPUs, with migration 2613 * disabled, therefore using smp_processor_id() is OK. 2614 */ 2615 static void mvpp2_txq_sent_counter_clear(void *arg) 2616 { 2617 struct mvpp2_port *port = arg; 2618 int queue; 2619 2620 /* If the thread isn't used, don't do anything */ 2621 if (smp_processor_id() >= port->priv->nthreads) 2622 return; 2623 2624 for (queue = 0; queue < port->ntxqs; queue++) { 2625 int id = port->txqs[queue]->id; 2626 2627 mvpp2_thread_read(port->priv, 2628 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2629 MVPP2_TXQ_SENT_REG(id)); 2630 } 2631 } 2632 2633 /* Set max sizes for Tx queues */ 2634 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 2635 { 2636 u32 val, size, mtu; 2637 int txq, tx_port_num; 2638 2639 mtu = port->pkt_size * 8; 2640 if (mtu > MVPP2_TXP_MTU_MAX) 2641 mtu = MVPP2_TXP_MTU_MAX; 2642 2643 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 2644 mtu = 3 * mtu; 2645 2646 /* Indirect access to registers */ 2647 tx_port_num = mvpp2_egress_port(port); 2648 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2649 2650 /* Set MTU */ 2651 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 2652 val &= ~MVPP2_TXP_MTU_MAX; 2653 val |= mtu; 2654 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 2655 2656 /* TXP token size and all TXQs token size must be larger that MTU */ 2657 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 2658 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 2659 if (size < mtu) { 2660 size = mtu; 2661 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 2662 val |= size; 2663 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 2664 } 2665 2666 for (txq = 0; txq < port->ntxqs; txq++) { 2667 val = mvpp2_read(port->priv, 2668 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 2669 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 2670 2671 if (size < mtu) { 2672 size = mtu; 2673 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 2674 val |= size; 2675 mvpp2_write(port->priv, 2676 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 2677 val); 2678 } 2679 } 2680 } 2681 2682 /* Set the number of non-occupied descriptors threshold */ 2683 static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port, 2684 struct mvpp2_rx_queue *rxq) 2685 { 2686 u32 val; 2687 2688 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 2689 2690 val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG); 2691 val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK; 2692 val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET; 2693 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); 2694 } 2695 2696 /* Set the number of packets that will be received before Rx interrupt 2697 * will be generated by HW. 2698 */ 2699 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, 2700 struct mvpp2_rx_queue *rxq) 2701 { 2702 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2703 2704 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) 2705 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; 2706 2707 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 2708 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, 2709 rxq->pkts_coal); 2710 2711 put_cpu(); 2712 } 2713 2714 /* For some reason in the LSP this is done on each CPU. Why ? */ 2715 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, 2716 struct mvpp2_tx_queue *txq) 2717 { 2718 unsigned int thread; 2719 u32 val; 2720 2721 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) 2722 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; 2723 2724 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); 2725 /* PKT-coalescing registers are per-queue + per-thread */ 2726 for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) { 2727 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 2728 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); 2729 } 2730 } 2731 2732 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) 2733 { 2734 u64 tmp = (u64)clk_hz * usec; 2735 2736 do_div(tmp, USEC_PER_SEC); 2737 2738 return tmp > U32_MAX ? U32_MAX : tmp; 2739 } 2740 2741 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) 2742 { 2743 u64 tmp = (u64)cycles * USEC_PER_SEC; 2744 2745 do_div(tmp, clk_hz); 2746 2747 return tmp > U32_MAX ? U32_MAX : tmp; 2748 } 2749 2750 /* Set the time delay in usec before Rx interrupt */ 2751 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, 2752 struct mvpp2_rx_queue *rxq) 2753 { 2754 unsigned long freq = port->priv->tclk; 2755 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 2756 2757 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { 2758 rxq->time_coal = 2759 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); 2760 2761 /* re-evaluate to get actual register value */ 2762 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 2763 } 2764 2765 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); 2766 } 2767 2768 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) 2769 { 2770 unsigned long freq = port->priv->tclk; 2771 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 2772 2773 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { 2774 port->tx_time_coal = 2775 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); 2776 2777 /* re-evaluate to get actual register value */ 2778 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 2779 } 2780 2781 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); 2782 } 2783 2784 /* Free Tx queue skbuffs */ 2785 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 2786 struct mvpp2_tx_queue *txq, 2787 struct mvpp2_txq_pcpu *txq_pcpu, int num) 2788 { 2789 struct xdp_frame_bulk bq; 2790 int i; 2791 2792 xdp_frame_bulk_init(&bq); 2793 2794 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 2795 2796 for (i = 0; i < num; i++) { 2797 struct mvpp2_txq_pcpu_buf *tx_buf = 2798 txq_pcpu->buffs + txq_pcpu->txq_get_index; 2799 2800 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) && 2801 tx_buf->type != MVPP2_TYPE_XDP_TX) 2802 dma_unmap_single(port->dev->dev.parent, tx_buf->dma, 2803 tx_buf->size, DMA_TO_DEVICE); 2804 if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb) 2805 dev_kfree_skb_any(tx_buf->skb); 2806 else if (tx_buf->type == MVPP2_TYPE_XDP_TX || 2807 tx_buf->type == MVPP2_TYPE_XDP_NDO) 2808 xdp_return_frame_bulk(tx_buf->xdpf, &bq); 2809 2810 mvpp2_txq_inc_get(txq_pcpu); 2811 } 2812 xdp_flush_frame_bulk(&bq); 2813 2814 rcu_read_unlock(); 2815 } 2816 2817 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 2818 u32 cause) 2819 { 2820 int queue = fls(cause) - 1; 2821 2822 return port->rxqs[queue]; 2823 } 2824 2825 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 2826 u32 cause) 2827 { 2828 int queue = fls(cause) - 1; 2829 2830 return port->txqs[queue]; 2831 } 2832 2833 /* Handle end of transmission */ 2834 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 2835 struct mvpp2_txq_pcpu *txq_pcpu) 2836 { 2837 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); 2838 int tx_done; 2839 2840 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) 2841 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); 2842 2843 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 2844 if (!tx_done) 2845 return; 2846 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); 2847 2848 txq_pcpu->count -= tx_done; 2849 2850 if (netif_tx_queue_stopped(nq)) 2851 if (txq_pcpu->count <= txq_pcpu->wake_threshold) 2852 netif_tx_wake_queue(nq); 2853 } 2854 2855 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, 2856 unsigned int thread) 2857 { 2858 struct mvpp2_tx_queue *txq; 2859 struct mvpp2_txq_pcpu *txq_pcpu; 2860 unsigned int tx_todo = 0; 2861 2862 while (cause) { 2863 txq = mvpp2_get_tx_queue(port, cause); 2864 if (!txq) 2865 break; 2866 2867 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 2868 2869 if (txq_pcpu->count) { 2870 mvpp2_txq_done(port, txq, txq_pcpu); 2871 tx_todo += txq_pcpu->count; 2872 } 2873 2874 cause &= ~(1 << txq->log_id); 2875 } 2876 return tx_todo; 2877 } 2878 2879 /* Rx/Tx queue initialization/cleanup methods */ 2880 2881 /* Allocate and initialize descriptors for aggr TXQ */ 2882 static int mvpp2_aggr_txq_init(struct platform_device *pdev, 2883 struct mvpp2_tx_queue *aggr_txq, 2884 unsigned int thread, struct mvpp2 *priv) 2885 { 2886 u32 txq_dma; 2887 2888 /* Allocate memory for TX descriptors */ 2889 aggr_txq->descs = dma_alloc_coherent(&pdev->dev, 2890 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2891 &aggr_txq->descs_dma, GFP_KERNEL); 2892 if (!aggr_txq->descs) 2893 return -ENOMEM; 2894 2895 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; 2896 2897 /* Aggr TXQ no reset WA */ 2898 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 2899 MVPP2_AGGR_TXQ_INDEX_REG(thread)); 2900 2901 /* Set Tx descriptors queue starting address indirect 2902 * access 2903 */ 2904 if (priv->hw_version == MVPP21) 2905 txq_dma = aggr_txq->descs_dma; 2906 else 2907 txq_dma = aggr_txq->descs_dma >> 2908 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 2909 2910 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); 2911 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), 2912 MVPP2_AGGR_TXQ_SIZE); 2913 2914 return 0; 2915 } 2916 2917 /* Create a specified Rx queue */ 2918 static int mvpp2_rxq_init(struct mvpp2_port *port, 2919 struct mvpp2_rx_queue *rxq) 2920 { 2921 struct mvpp2 *priv = port->priv; 2922 unsigned int thread; 2923 u32 rxq_dma; 2924 int err; 2925 2926 rxq->size = port->rx_ring_size; 2927 2928 /* Allocate memory for RX descriptors */ 2929 rxq->descs = dma_alloc_coherent(port->dev->dev.parent, 2930 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 2931 &rxq->descs_dma, GFP_KERNEL); 2932 if (!rxq->descs) 2933 return -ENOMEM; 2934 2935 rxq->last_desc = rxq->size - 1; 2936 2937 /* Zero occupied and non-occupied counters - direct access */ 2938 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 2939 2940 /* Set Rx descriptors queue starting address - indirect access */ 2941 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2942 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 2943 if (port->priv->hw_version == MVPP21) 2944 rxq_dma = rxq->descs_dma; 2945 else 2946 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 2947 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 2948 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 2949 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); 2950 put_cpu(); 2951 2952 /* Set Offset */ 2953 mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM); 2954 2955 /* Set coalescing pkts and time */ 2956 mvpp2_rx_pkts_coal_set(port, rxq); 2957 mvpp2_rx_time_coal_set(port, rxq); 2958 2959 /* Set the number of non occupied descriptors threshold */ 2960 mvpp2_set_rxq_free_tresh(port, rxq); 2961 2962 /* Add number of descriptors ready for receiving packets */ 2963 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 2964 2965 if (priv->percpu_pools) { 2966 err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0); 2967 if (err < 0) 2968 goto err_free_dma; 2969 2970 err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0); 2971 if (err < 0) 2972 goto err_unregister_rxq_short; 2973 2974 /* Every RXQ has a pool for short and another for long packets */ 2975 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short, 2976 MEM_TYPE_PAGE_POOL, 2977 priv->page_pool[rxq->logic_rxq]); 2978 if (err < 0) 2979 goto err_unregister_rxq_long; 2980 2981 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long, 2982 MEM_TYPE_PAGE_POOL, 2983 priv->page_pool[rxq->logic_rxq + 2984 port->nrxqs]); 2985 if (err < 0) 2986 goto err_unregister_mem_rxq_short; 2987 } 2988 2989 return 0; 2990 2991 err_unregister_mem_rxq_short: 2992 xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short); 2993 err_unregister_rxq_long: 2994 xdp_rxq_info_unreg(&rxq->xdp_rxq_long); 2995 err_unregister_rxq_short: 2996 xdp_rxq_info_unreg(&rxq->xdp_rxq_short); 2997 err_free_dma: 2998 dma_free_coherent(port->dev->dev.parent, 2999 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 3000 rxq->descs, rxq->descs_dma); 3001 return err; 3002 } 3003 3004 /* Push packets received by the RXQ to BM pool */ 3005 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 3006 struct mvpp2_rx_queue *rxq) 3007 { 3008 int rx_received, i; 3009 3010 rx_received = mvpp2_rxq_received(port, rxq->id); 3011 if (!rx_received) 3012 return; 3013 3014 for (i = 0; i < rx_received; i++) { 3015 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 3016 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 3017 int pool; 3018 3019 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> 3020 MVPP2_RXD_BM_POOL_ID_OFFS; 3021 3022 mvpp2_bm_pool_put(port, pool, 3023 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 3024 mvpp2_rxdesc_cookie_get(port, rx_desc)); 3025 } 3026 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 3027 } 3028 3029 /* Cleanup Rx queue */ 3030 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 3031 struct mvpp2_rx_queue *rxq) 3032 { 3033 unsigned int thread; 3034 3035 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short)) 3036 xdp_rxq_info_unreg(&rxq->xdp_rxq_short); 3037 3038 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long)) 3039 xdp_rxq_info_unreg(&rxq->xdp_rxq_long); 3040 3041 mvpp2_rxq_drop_pkts(port, rxq); 3042 3043 if (rxq->descs) 3044 dma_free_coherent(port->dev->dev.parent, 3045 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 3046 rxq->descs, 3047 rxq->descs_dma); 3048 3049 rxq->descs = NULL; 3050 rxq->last_desc = 0; 3051 rxq->next_desc_to_proc = 0; 3052 rxq->descs_dma = 0; 3053 3054 /* Clear Rx descriptors queue starting address and size; 3055 * free descriptor number 3056 */ 3057 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 3058 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3059 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 3060 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); 3061 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); 3062 put_cpu(); 3063 } 3064 3065 /* Create and initialize a Tx queue */ 3066 static int mvpp2_txq_init(struct mvpp2_port *port, 3067 struct mvpp2_tx_queue *txq) 3068 { 3069 u32 val; 3070 unsigned int thread; 3071 int desc, desc_per_txq, tx_port_num; 3072 struct mvpp2_txq_pcpu *txq_pcpu; 3073 3074 txq->size = port->tx_ring_size; 3075 3076 /* Allocate memory for Tx descriptors */ 3077 txq->descs = dma_alloc_coherent(port->dev->dev.parent, 3078 txq->size * MVPP2_DESC_ALIGNED_SIZE, 3079 &txq->descs_dma, GFP_KERNEL); 3080 if (!txq->descs) 3081 return -ENOMEM; 3082 3083 txq->last_desc = txq->size - 1; 3084 3085 /* Set Tx descriptors queue starting address - indirect access */ 3086 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3087 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 3088 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 3089 txq->descs_dma); 3090 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 3091 txq->size & MVPP2_TXQ_DESC_SIZE_MASK); 3092 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); 3093 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, 3094 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 3095 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); 3096 val &= ~MVPP2_TXQ_PENDING_MASK; 3097 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); 3098 3099 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 3100 * for each existing TXQ. 3101 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 3102 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS 3103 */ 3104 desc_per_txq = 16; 3105 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 3106 (txq->log_id * desc_per_txq); 3107 3108 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, 3109 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 3110 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 3111 put_cpu(); 3112 3113 /* WRR / EJP configuration - indirect access */ 3114 tx_port_num = mvpp2_egress_port(port); 3115 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3116 3117 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 3118 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 3119 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 3120 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 3121 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 3122 3123 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 3124 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 3125 val); 3126 3127 for (thread = 0; thread < port->priv->nthreads; thread++) { 3128 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3129 txq_pcpu->size = txq->size; 3130 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, 3131 sizeof(*txq_pcpu->buffs), 3132 GFP_KERNEL); 3133 if (!txq_pcpu->buffs) 3134 return -ENOMEM; 3135 3136 txq_pcpu->count = 0; 3137 txq_pcpu->reserved_num = 0; 3138 txq_pcpu->txq_put_index = 0; 3139 txq_pcpu->txq_get_index = 0; 3140 txq_pcpu->tso_headers = NULL; 3141 3142 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; 3143 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; 3144 3145 txq_pcpu->tso_headers = 3146 dma_alloc_coherent(port->dev->dev.parent, 3147 txq_pcpu->size * TSO_HEADER_SIZE, 3148 &txq_pcpu->tso_headers_dma, 3149 GFP_KERNEL); 3150 if (!txq_pcpu->tso_headers) 3151 return -ENOMEM; 3152 } 3153 3154 return 0; 3155 } 3156 3157 /* Free allocated TXQ resources */ 3158 static void mvpp2_txq_deinit(struct mvpp2_port *port, 3159 struct mvpp2_tx_queue *txq) 3160 { 3161 struct mvpp2_txq_pcpu *txq_pcpu; 3162 unsigned int thread; 3163 3164 for (thread = 0; thread < port->priv->nthreads; thread++) { 3165 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3166 kfree(txq_pcpu->buffs); 3167 3168 if (txq_pcpu->tso_headers) 3169 dma_free_coherent(port->dev->dev.parent, 3170 txq_pcpu->size * TSO_HEADER_SIZE, 3171 txq_pcpu->tso_headers, 3172 txq_pcpu->tso_headers_dma); 3173 3174 txq_pcpu->tso_headers = NULL; 3175 } 3176 3177 if (txq->descs) 3178 dma_free_coherent(port->dev->dev.parent, 3179 txq->size * MVPP2_DESC_ALIGNED_SIZE, 3180 txq->descs, txq->descs_dma); 3181 3182 txq->descs = NULL; 3183 txq->last_desc = 0; 3184 txq->next_desc_to_proc = 0; 3185 txq->descs_dma = 0; 3186 3187 /* Set minimum bandwidth for disabled TXQs */ 3188 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); 3189 3190 /* Set Tx descriptors queue starting address and size */ 3191 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3192 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 3193 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); 3194 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); 3195 put_cpu(); 3196 } 3197 3198 /* Cleanup Tx ports */ 3199 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 3200 { 3201 struct mvpp2_txq_pcpu *txq_pcpu; 3202 int delay, pending; 3203 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3204 u32 val; 3205 3206 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 3207 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); 3208 val |= MVPP2_TXQ_DRAIN_EN_MASK; 3209 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); 3210 3211 /* The napi queue has been stopped so wait for all packets 3212 * to be transmitted. 3213 */ 3214 delay = 0; 3215 do { 3216 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 3217 netdev_warn(port->dev, 3218 "port %d: cleaning queue %d timed out\n", 3219 port->id, txq->log_id); 3220 break; 3221 } 3222 mdelay(1); 3223 delay++; 3224 3225 pending = mvpp2_thread_read(port->priv, thread, 3226 MVPP2_TXQ_PENDING_REG); 3227 pending &= MVPP2_TXQ_PENDING_MASK; 3228 } while (pending); 3229 3230 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 3231 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); 3232 put_cpu(); 3233 3234 for (thread = 0; thread < port->priv->nthreads; thread++) { 3235 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3236 3237 /* Release all packets */ 3238 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 3239 3240 /* Reset queue */ 3241 txq_pcpu->count = 0; 3242 txq_pcpu->txq_put_index = 0; 3243 txq_pcpu->txq_get_index = 0; 3244 } 3245 } 3246 3247 /* Cleanup all Tx queues */ 3248 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 3249 { 3250 struct mvpp2_tx_queue *txq; 3251 int queue; 3252 u32 val; 3253 3254 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 3255 3256 /* Reset Tx ports and delete Tx queues */ 3257 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 3258 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 3259 3260 for (queue = 0; queue < port->ntxqs; queue++) { 3261 txq = port->txqs[queue]; 3262 mvpp2_txq_clean(port, txq); 3263 mvpp2_txq_deinit(port, txq); 3264 } 3265 3266 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 3267 3268 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 3269 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 3270 } 3271 3272 /* Cleanup all Rx queues */ 3273 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 3274 { 3275 int queue; 3276 3277 for (queue = 0; queue < port->nrxqs; queue++) 3278 mvpp2_rxq_deinit(port, port->rxqs[queue]); 3279 3280 if (port->tx_fc) 3281 mvpp2_rxq_disable_fc(port); 3282 } 3283 3284 /* Init all Rx queues for port */ 3285 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 3286 { 3287 int queue, err; 3288 3289 for (queue = 0; queue < port->nrxqs; queue++) { 3290 err = mvpp2_rxq_init(port, port->rxqs[queue]); 3291 if (err) 3292 goto err_cleanup; 3293 } 3294 3295 if (port->tx_fc) 3296 mvpp2_rxq_enable_fc(port); 3297 3298 return 0; 3299 3300 err_cleanup: 3301 mvpp2_cleanup_rxqs(port); 3302 return err; 3303 } 3304 3305 /* Init all tx queues for port */ 3306 static int mvpp2_setup_txqs(struct mvpp2_port *port) 3307 { 3308 struct mvpp2_tx_queue *txq; 3309 int queue, err; 3310 3311 for (queue = 0; queue < port->ntxqs; queue++) { 3312 txq = port->txqs[queue]; 3313 err = mvpp2_txq_init(port, txq); 3314 if (err) 3315 goto err_cleanup; 3316 3317 /* Assign this queue to a CPU */ 3318 if (queue < num_possible_cpus()) 3319 netif_set_xps_queue(port->dev, cpumask_of(queue), queue); 3320 } 3321 3322 if (port->has_tx_irqs) { 3323 mvpp2_tx_time_coal_set(port); 3324 for (queue = 0; queue < port->ntxqs; queue++) { 3325 txq = port->txqs[queue]; 3326 mvpp2_tx_pkts_coal_set(port, txq); 3327 } 3328 } 3329 3330 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 3331 return 0; 3332 3333 err_cleanup: 3334 mvpp2_cleanup_txqs(port); 3335 return err; 3336 } 3337 3338 /* The callback for per-port interrupt */ 3339 static irqreturn_t mvpp2_isr(int irq, void *dev_id) 3340 { 3341 struct mvpp2_queue_vector *qv = dev_id; 3342 3343 mvpp2_qvec_interrupt_disable(qv); 3344 3345 napi_schedule(&qv->napi); 3346 3347 return IRQ_HANDLED; 3348 } 3349 3350 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) 3351 { 3352 struct skb_shared_hwtstamps shhwtstamps; 3353 struct mvpp2_hwtstamp_queue *queue; 3354 struct sk_buff *skb; 3355 void __iomem *ptp_q; 3356 unsigned int id; 3357 u32 r0, r1, r2; 3358 3359 ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); 3360 if (nq) 3361 ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0; 3362 3363 queue = &port->tx_hwtstamp_queue[nq]; 3364 3365 while (1) { 3366 r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff; 3367 if (!r0) 3368 break; 3369 3370 r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff; 3371 r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff; 3372 3373 id = (r0 >> 1) & 31; 3374 3375 skb = queue->skb[id]; 3376 queue->skb[id] = NULL; 3377 if (skb) { 3378 u32 ts = r2 << 19 | r1 << 3 | r0 >> 13; 3379 3380 mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps); 3381 skb_tstamp_tx(skb, &shhwtstamps); 3382 dev_kfree_skb_any(skb); 3383 } 3384 } 3385 } 3386 3387 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port) 3388 { 3389 void __iomem *ptp; 3390 u32 val; 3391 3392 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); 3393 val = readl(ptp + MVPP22_PTP_INT_CAUSE); 3394 if (val & MVPP22_PTP_INT_CAUSE_QUEUE0) 3395 mvpp2_isr_handle_ptp_queue(port, 0); 3396 if (val & MVPP22_PTP_INT_CAUSE_QUEUE1) 3397 mvpp2_isr_handle_ptp_queue(port, 1); 3398 } 3399 3400 static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link) 3401 { 3402 struct net_device *dev = port->dev; 3403 3404 if (port->phylink) { 3405 phylink_mac_change(port->phylink, link); 3406 return; 3407 } 3408 3409 if (!netif_running(dev)) 3410 return; 3411 3412 if (link) { 3413 mvpp2_interrupts_enable(port); 3414 3415 mvpp2_egress_enable(port); 3416 mvpp2_ingress_enable(port); 3417 netif_carrier_on(dev); 3418 netif_tx_wake_all_queues(dev); 3419 } else { 3420 netif_tx_stop_all_queues(dev); 3421 netif_carrier_off(dev); 3422 mvpp2_ingress_disable(port); 3423 mvpp2_egress_disable(port); 3424 3425 mvpp2_interrupts_disable(port); 3426 } 3427 } 3428 3429 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port) 3430 { 3431 bool link; 3432 u32 val; 3433 3434 val = readl(port->base + MVPP22_XLG_INT_STAT); 3435 if (val & MVPP22_XLG_INT_STAT_LINK) { 3436 val = readl(port->base + MVPP22_XLG_STATUS); 3437 link = (val & MVPP22_XLG_STATUS_LINK_UP); 3438 mvpp2_isr_handle_link(port, link); 3439 } 3440 } 3441 3442 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port) 3443 { 3444 bool link; 3445 u32 val; 3446 3447 if (phy_interface_mode_is_rgmii(port->phy_interface) || 3448 phy_interface_mode_is_8023z(port->phy_interface) || 3449 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 3450 val = readl(port->base + MVPP22_GMAC_INT_STAT); 3451 if (val & MVPP22_GMAC_INT_STAT_LINK) { 3452 val = readl(port->base + MVPP2_GMAC_STATUS0); 3453 link = (val & MVPP2_GMAC_STATUS0_LINK_UP); 3454 mvpp2_isr_handle_link(port, link); 3455 } 3456 } 3457 } 3458 3459 /* Per-port interrupt for link status changes */ 3460 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id) 3461 { 3462 struct mvpp2_port *port = (struct mvpp2_port *)dev_id; 3463 u32 val; 3464 3465 mvpp22_gop_mask_irq(port); 3466 3467 if (mvpp2_port_supports_xlg(port) && 3468 mvpp2_is_xlg(port->phy_interface)) { 3469 /* Check the external status register */ 3470 val = readl(port->base + MVPP22_XLG_EXT_INT_STAT); 3471 if (val & MVPP22_XLG_EXT_INT_STAT_XLG) 3472 mvpp2_isr_handle_xlg(port); 3473 if (val & MVPP22_XLG_EXT_INT_STAT_PTP) 3474 mvpp2_isr_handle_ptp(port); 3475 } else { 3476 /* If it's not the XLG, we must be using the GMAC. 3477 * Check the summary status. 3478 */ 3479 val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT); 3480 if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL) 3481 mvpp2_isr_handle_gmac_internal(port); 3482 if (val & MVPP22_GMAC_INT_SUM_STAT_PTP) 3483 mvpp2_isr_handle_ptp(port); 3484 } 3485 3486 mvpp22_gop_unmask_irq(port); 3487 return IRQ_HANDLED; 3488 } 3489 3490 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) 3491 { 3492 struct net_device *dev; 3493 struct mvpp2_port *port; 3494 struct mvpp2_port_pcpu *port_pcpu; 3495 unsigned int tx_todo, cause; 3496 3497 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); 3498 dev = port_pcpu->dev; 3499 3500 if (!netif_running(dev)) 3501 return HRTIMER_NORESTART; 3502 3503 port_pcpu->timer_scheduled = false; 3504 port = netdev_priv(dev); 3505 3506 /* Process all the Tx queues */ 3507 cause = (1 << port->ntxqs) - 1; 3508 tx_todo = mvpp2_tx_done(port, cause, 3509 mvpp2_cpu_to_thread(port->priv, smp_processor_id())); 3510 3511 /* Set the timer in case not all the packets were processed */ 3512 if (tx_todo && !port_pcpu->timer_scheduled) { 3513 port_pcpu->timer_scheduled = true; 3514 hrtimer_forward_now(&port_pcpu->tx_done_timer, 3515 MVPP2_TXDONE_HRTIMER_PERIOD_NS); 3516 3517 return HRTIMER_RESTART; 3518 } 3519 return HRTIMER_NORESTART; 3520 } 3521 3522 /* Main RX/TX processing routines */ 3523 3524 /* Display more error info */ 3525 static void mvpp2_rx_error(struct mvpp2_port *port, 3526 struct mvpp2_rx_desc *rx_desc) 3527 { 3528 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 3529 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 3530 char *err_str = NULL; 3531 3532 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 3533 case MVPP2_RXD_ERR_CRC: 3534 err_str = "crc"; 3535 break; 3536 case MVPP2_RXD_ERR_OVERRUN: 3537 err_str = "overrun"; 3538 break; 3539 case MVPP2_RXD_ERR_RESOURCE: 3540 err_str = "resource"; 3541 break; 3542 } 3543 if (err_str && net_ratelimit()) 3544 netdev_err(port->dev, 3545 "bad rx status %08x (%s error), size=%zu\n", 3546 status, err_str, sz); 3547 } 3548 3549 /* Handle RX checksum offload */ 3550 static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status) 3551 { 3552 if (((status & MVPP2_RXD_L3_IP4) && 3553 !(status & MVPP2_RXD_IP4_HEADER_ERR)) || 3554 (status & MVPP2_RXD_L3_IP6)) 3555 if (((status & MVPP2_RXD_L4_UDP) || 3556 (status & MVPP2_RXD_L4_TCP)) && 3557 (status & MVPP2_RXD_L4_CSUM_OK)) 3558 return CHECKSUM_UNNECESSARY; 3559 3560 return CHECKSUM_NONE; 3561 } 3562 3563 /* Allocate a new skb and add it to BM pool */ 3564 static int mvpp2_rx_refill(struct mvpp2_port *port, 3565 struct mvpp2_bm_pool *bm_pool, 3566 struct page_pool *page_pool, int pool) 3567 { 3568 dma_addr_t dma_addr; 3569 phys_addr_t phys_addr; 3570 void *buf; 3571 3572 buf = mvpp2_buf_alloc(port, bm_pool, page_pool, 3573 &dma_addr, &phys_addr, GFP_ATOMIC); 3574 if (!buf) 3575 return -ENOMEM; 3576 3577 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 3578 3579 return 0; 3580 } 3581 3582 /* Handle tx checksum */ 3583 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) 3584 { 3585 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3586 int ip_hdr_len = 0; 3587 u8 l4_proto; 3588 __be16 l3_proto = vlan_get_protocol(skb); 3589 3590 if (l3_proto == htons(ETH_P_IP)) { 3591 struct iphdr *ip4h = ip_hdr(skb); 3592 3593 /* Calculate IPv4 checksum and L4 checksum */ 3594 ip_hdr_len = ip4h->ihl; 3595 l4_proto = ip4h->protocol; 3596 } else if (l3_proto == htons(ETH_P_IPV6)) { 3597 struct ipv6hdr *ip6h = ipv6_hdr(skb); 3598 3599 /* Read l4_protocol from one of IPv6 extra headers */ 3600 if (skb_network_header_len(skb) > 0) 3601 ip_hdr_len = (skb_network_header_len(skb) >> 2); 3602 l4_proto = ip6h->nexthdr; 3603 } else { 3604 return MVPP2_TXD_L4_CSUM_NOT; 3605 } 3606 3607 return mvpp2_txq_desc_csum(skb_network_offset(skb), 3608 l3_proto, ip_hdr_len, l4_proto); 3609 } 3610 3611 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; 3612 } 3613 3614 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) 3615 { 3616 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 3617 struct mvpp2_tx_queue *aggr_txq; 3618 struct mvpp2_txq_pcpu *txq_pcpu; 3619 struct mvpp2_tx_queue *txq; 3620 struct netdev_queue *nq; 3621 3622 txq = port->txqs[txq_id]; 3623 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3624 nq = netdev_get_tx_queue(port->dev, txq_id); 3625 aggr_txq = &port->priv->aggr_txqs[thread]; 3626 3627 txq_pcpu->reserved_num -= nxmit; 3628 txq_pcpu->count += nxmit; 3629 aggr_txq->count += nxmit; 3630 3631 /* Enable transmit */ 3632 wmb(); 3633 mvpp2_aggr_txq_pend_desc_add(port, nxmit); 3634 3635 if (txq_pcpu->count >= txq_pcpu->stop_threshold) 3636 netif_tx_stop_queue(nq); 3637 3638 /* Finalize TX processing */ 3639 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) 3640 mvpp2_txq_done(port, txq, txq_pcpu); 3641 } 3642 3643 static int 3644 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, 3645 struct xdp_frame *xdpf, bool dma_map) 3646 { 3647 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 3648 u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE | 3649 MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 3650 enum mvpp2_tx_buf_type buf_type; 3651 struct mvpp2_txq_pcpu *txq_pcpu; 3652 struct mvpp2_tx_queue *aggr_txq; 3653 struct mvpp2_tx_desc *tx_desc; 3654 struct mvpp2_tx_queue *txq; 3655 int ret = MVPP2_XDP_TX; 3656 dma_addr_t dma_addr; 3657 3658 txq = port->txqs[txq_id]; 3659 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3660 aggr_txq = &port->priv->aggr_txqs[thread]; 3661 3662 /* Check number of available descriptors */ 3663 if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) || 3664 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) { 3665 ret = MVPP2_XDP_DROPPED; 3666 goto out; 3667 } 3668 3669 /* Get a descriptor for the first part of the packet */ 3670 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 3671 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 3672 mvpp2_txdesc_size_set(port, tx_desc, xdpf->len); 3673 3674 if (dma_map) { 3675 /* XDP_REDIRECT or AF_XDP */ 3676 dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data, 3677 xdpf->len, DMA_TO_DEVICE); 3678 3679 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { 3680 mvpp2_txq_desc_put(txq); 3681 ret = MVPP2_XDP_DROPPED; 3682 goto out; 3683 } 3684 3685 buf_type = MVPP2_TYPE_XDP_NDO; 3686 } else { 3687 /* XDP_TX */ 3688 struct page *page = virt_to_page(xdpf->data); 3689 3690 dma_addr = page_pool_get_dma_addr(page) + 3691 sizeof(*xdpf) + xdpf->headroom; 3692 dma_sync_single_for_device(port->dev->dev.parent, dma_addr, 3693 xdpf->len, DMA_BIDIRECTIONAL); 3694 3695 buf_type = MVPP2_TYPE_XDP_TX; 3696 } 3697 3698 mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr); 3699 3700 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 3701 mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type); 3702 3703 out: 3704 return ret; 3705 } 3706 3707 static int 3708 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) 3709 { 3710 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 3711 struct xdp_frame *xdpf; 3712 u16 txq_id; 3713 int ret; 3714 3715 xdpf = xdp_convert_buff_to_frame(xdp); 3716 if (unlikely(!xdpf)) 3717 return MVPP2_XDP_DROPPED; 3718 3719 /* The first of the TX queues are used for XPS, 3720 * the second half for XDP_TX 3721 */ 3722 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); 3723 3724 ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false); 3725 if (ret == MVPP2_XDP_TX) { 3726 u64_stats_update_begin(&stats->syncp); 3727 stats->tx_bytes += xdpf->len; 3728 stats->tx_packets++; 3729 stats->xdp_tx++; 3730 u64_stats_update_end(&stats->syncp); 3731 3732 mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len); 3733 } else { 3734 u64_stats_update_begin(&stats->syncp); 3735 stats->xdp_tx_err++; 3736 u64_stats_update_end(&stats->syncp); 3737 } 3738 3739 return ret; 3740 } 3741 3742 static int 3743 mvpp2_xdp_xmit(struct net_device *dev, int num_frame, 3744 struct xdp_frame **frames, u32 flags) 3745 { 3746 struct mvpp2_port *port = netdev_priv(dev); 3747 int i, nxmit_byte = 0, nxmit = 0; 3748 struct mvpp2_pcpu_stats *stats; 3749 u16 txq_id; 3750 u32 ret; 3751 3752 if (unlikely(test_bit(0, &port->state))) 3753 return -ENETDOWN; 3754 3755 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 3756 return -EINVAL; 3757 3758 /* The first of the TX queues are used for XPS, 3759 * the second half for XDP_TX 3760 */ 3761 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); 3762 3763 for (i = 0; i < num_frame; i++) { 3764 ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true); 3765 if (ret != MVPP2_XDP_TX) 3766 break; 3767 3768 nxmit_byte += frames[i]->len; 3769 nxmit++; 3770 } 3771 3772 if (likely(nxmit > 0)) 3773 mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); 3774 3775 stats = this_cpu_ptr(port->stats); 3776 u64_stats_update_begin(&stats->syncp); 3777 stats->tx_bytes += nxmit_byte; 3778 stats->tx_packets += nxmit; 3779 stats->xdp_xmit += nxmit; 3780 stats->xdp_xmit_err += num_frame - nxmit; 3781 u64_stats_update_end(&stats->syncp); 3782 3783 return nxmit; 3784 } 3785 3786 static int 3787 mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog, 3788 struct xdp_buff *xdp, struct page_pool *pp, 3789 struct mvpp2_pcpu_stats *stats) 3790 { 3791 unsigned int len, sync, err; 3792 struct page *page; 3793 u32 ret, act; 3794 3795 len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; 3796 act = bpf_prog_run_xdp(prog, xdp); 3797 3798 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 3799 sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; 3800 sync = max(sync, len); 3801 3802 switch (act) { 3803 case XDP_PASS: 3804 stats->xdp_pass++; 3805 ret = MVPP2_XDP_PASS; 3806 break; 3807 case XDP_REDIRECT: 3808 err = xdp_do_redirect(port->dev, xdp, prog); 3809 if (unlikely(err)) { 3810 ret = MVPP2_XDP_DROPPED; 3811 page = virt_to_head_page(xdp->data); 3812 page_pool_put_page(pp, page, sync, true); 3813 } else { 3814 ret = MVPP2_XDP_REDIR; 3815 stats->xdp_redirect++; 3816 } 3817 break; 3818 case XDP_TX: 3819 ret = mvpp2_xdp_xmit_back(port, xdp); 3820 if (ret != MVPP2_XDP_TX) { 3821 page = virt_to_head_page(xdp->data); 3822 page_pool_put_page(pp, page, sync, true); 3823 } 3824 break; 3825 default: 3826 bpf_warn_invalid_xdp_action(port->dev, prog, act); 3827 fallthrough; 3828 case XDP_ABORTED: 3829 trace_xdp_exception(port->dev, prog, act); 3830 fallthrough; 3831 case XDP_DROP: 3832 page = virt_to_head_page(xdp->data); 3833 page_pool_put_page(pp, page, sync, true); 3834 ret = MVPP2_XDP_DROPPED; 3835 stats->xdp_drop++; 3836 break; 3837 } 3838 3839 return ret; 3840 } 3841 3842 static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc, 3843 int pool, u32 rx_status) 3844 { 3845 phys_addr_t phys_addr, phys_addr_next; 3846 dma_addr_t dma_addr, dma_addr_next; 3847 struct mvpp2_buff_hdr *buff_hdr; 3848 3849 phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 3850 dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 3851 3852 do { 3853 buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr); 3854 3855 phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr); 3856 dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr); 3857 3858 if (port->priv->hw_version >= MVPP22) { 3859 phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32); 3860 dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32); 3861 } 3862 3863 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 3864 3865 phys_addr = phys_addr_next; 3866 dma_addr = dma_addr_next; 3867 3868 } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info))); 3869 } 3870 3871 /* Main rx processing */ 3872 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, 3873 int rx_todo, struct mvpp2_rx_queue *rxq) 3874 { 3875 struct net_device *dev = port->dev; 3876 struct mvpp2_pcpu_stats ps = {}; 3877 enum dma_data_direction dma_dir; 3878 struct bpf_prog *xdp_prog; 3879 struct xdp_buff xdp; 3880 int rx_received; 3881 int rx_done = 0; 3882 u32 xdp_ret = 0; 3883 3884 xdp_prog = READ_ONCE(port->xdp_prog); 3885 3886 /* Get number of received packets and clamp the to-do */ 3887 rx_received = mvpp2_rxq_received(port, rxq->id); 3888 if (rx_todo > rx_received) 3889 rx_todo = rx_received; 3890 3891 while (rx_done < rx_todo) { 3892 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 3893 struct mvpp2_bm_pool *bm_pool; 3894 struct page_pool *pp = NULL; 3895 struct sk_buff *skb; 3896 unsigned int frag_size; 3897 dma_addr_t dma_addr; 3898 phys_addr_t phys_addr; 3899 u32 rx_status, timestamp; 3900 int pool, rx_bytes, err, ret; 3901 struct page *page; 3902 void *data; 3903 3904 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 3905 data = (void *)phys_to_virt(phys_addr); 3906 page = virt_to_page(data); 3907 prefetch(page); 3908 3909 rx_done++; 3910 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 3911 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 3912 rx_bytes -= MVPP2_MH_SIZE; 3913 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 3914 3915 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> 3916 MVPP2_RXD_BM_POOL_ID_OFFS; 3917 bm_pool = &port->priv->bm_pools[pool]; 3918 3919 if (port->priv->percpu_pools) { 3920 pp = port->priv->page_pool[pool]; 3921 dma_dir = page_pool_get_dma_dir(pp); 3922 } else { 3923 dma_dir = DMA_FROM_DEVICE; 3924 } 3925 3926 dma_sync_single_for_cpu(dev->dev.parent, dma_addr, 3927 rx_bytes + MVPP2_MH_SIZE, 3928 dma_dir); 3929 3930 /* Buffer header not supported */ 3931 if (rx_status & MVPP2_RXD_BUF_HDR) 3932 goto err_drop_frame; 3933 3934 /* In case of an error, release the requested buffer pointer 3935 * to the Buffer Manager. This request process is controlled 3936 * by the hardware, and the information about the buffer is 3937 * comprised by the RX descriptor. 3938 */ 3939 if (rx_status & MVPP2_RXD_ERR_SUMMARY) 3940 goto err_drop_frame; 3941 3942 /* Prefetch header */ 3943 prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); 3944 3945 if (bm_pool->frag_size > PAGE_SIZE) 3946 frag_size = 0; 3947 else 3948 frag_size = bm_pool->frag_size; 3949 3950 if (xdp_prog) { 3951 struct xdp_rxq_info *xdp_rxq; 3952 3953 if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) 3954 xdp_rxq = &rxq->xdp_rxq_short; 3955 else 3956 xdp_rxq = &rxq->xdp_rxq_long; 3957 3958 xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq); 3959 xdp_prepare_buff(&xdp, data, 3960 MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM, 3961 rx_bytes, false); 3962 3963 ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps); 3964 3965 if (ret) { 3966 xdp_ret |= ret; 3967 err = mvpp2_rx_refill(port, bm_pool, pp, pool); 3968 if (err) { 3969 netdev_err(port->dev, "failed to refill BM pools\n"); 3970 goto err_drop_frame; 3971 } 3972 3973 ps.rx_packets++; 3974 ps.rx_bytes += rx_bytes; 3975 continue; 3976 } 3977 } 3978 3979 skb = build_skb(data, frag_size); 3980 if (!skb) { 3981 netdev_warn(port->dev, "skb build failed\n"); 3982 goto err_drop_frame; 3983 } 3984 3985 /* If we have RX hardware timestamping enabled, grab the 3986 * timestamp from the queue and convert. 3987 */ 3988 if (mvpp22_rx_hwtstamping(port)) { 3989 timestamp = le32_to_cpu(rx_desc->pp22.timestamp); 3990 mvpp22_tai_tstamp(port->priv->tai, timestamp, 3991 skb_hwtstamps(skb)); 3992 } 3993 3994 err = mvpp2_rx_refill(port, bm_pool, pp, pool); 3995 if (err) { 3996 netdev_err(port->dev, "failed to refill BM pools\n"); 3997 dev_kfree_skb_any(skb); 3998 goto err_drop_frame; 3999 } 4000 4001 if (pp) 4002 skb_mark_for_recycle(skb); 4003 else 4004 dma_unmap_single_attrs(dev->dev.parent, dma_addr, 4005 bm_pool->buf_size, DMA_FROM_DEVICE, 4006 DMA_ATTR_SKIP_CPU_SYNC); 4007 4008 ps.rx_packets++; 4009 ps.rx_bytes += rx_bytes; 4010 4011 skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); 4012 skb_put(skb, rx_bytes); 4013 skb->ip_summed = mvpp2_rx_csum(port, rx_status); 4014 skb->protocol = eth_type_trans(skb, dev); 4015 4016 napi_gro_receive(napi, skb); 4017 continue; 4018 4019 err_drop_frame: 4020 dev->stats.rx_errors++; 4021 mvpp2_rx_error(port, rx_desc); 4022 /* Return the buffer to the pool */ 4023 if (rx_status & MVPP2_RXD_BUF_HDR) 4024 mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status); 4025 else 4026 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 4027 } 4028 4029 if (xdp_ret & MVPP2_XDP_REDIR) 4030 xdp_do_flush_map(); 4031 4032 if (ps.rx_packets) { 4033 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 4034 4035 u64_stats_update_begin(&stats->syncp); 4036 stats->rx_packets += ps.rx_packets; 4037 stats->rx_bytes += ps.rx_bytes; 4038 /* xdp */ 4039 stats->xdp_redirect += ps.xdp_redirect; 4040 stats->xdp_pass += ps.xdp_pass; 4041 stats->xdp_drop += ps.xdp_drop; 4042 u64_stats_update_end(&stats->syncp); 4043 } 4044 4045 /* Update Rx queue management counters */ 4046 wmb(); 4047 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); 4048 4049 return rx_todo; 4050 } 4051 4052 static inline void 4053 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 4054 struct mvpp2_tx_desc *desc) 4055 { 4056 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4057 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 4058 4059 dma_addr_t buf_dma_addr = 4060 mvpp2_txdesc_dma_addr_get(port, desc); 4061 size_t buf_sz = 4062 mvpp2_txdesc_size_get(port, desc); 4063 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) 4064 dma_unmap_single(port->dev->dev.parent, buf_dma_addr, 4065 buf_sz, DMA_TO_DEVICE); 4066 mvpp2_txq_desc_put(txq); 4067 } 4068 4069 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port, 4070 struct mvpp2_tx_desc *desc) 4071 { 4072 /* We only need to clear the low bits */ 4073 if (port->priv->hw_version >= MVPP22) 4074 desc->pp22.ptp_descriptor &= 4075 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); 4076 } 4077 4078 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port, 4079 struct mvpp2_tx_desc *tx_desc, 4080 struct sk_buff *skb) 4081 { 4082 struct mvpp2_hwtstamp_queue *queue; 4083 unsigned int mtype, type, i; 4084 struct ptp_header *hdr; 4085 u64 ptpdesc; 4086 4087 if (port->priv->hw_version == MVPP21 || 4088 port->tx_hwtstamp_type == HWTSTAMP_TX_OFF) 4089 return false; 4090 4091 type = ptp_classify_raw(skb); 4092 if (!type) 4093 return false; 4094 4095 hdr = ptp_parse_header(skb, type); 4096 if (!hdr) 4097 return false; 4098 4099 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4100 4101 ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN | 4102 MVPP22_PTP_ACTION_CAPTURE; 4103 queue = &port->tx_hwtstamp_queue[0]; 4104 4105 switch (type & PTP_CLASS_VMASK) { 4106 case PTP_CLASS_V1: 4107 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1); 4108 break; 4109 4110 case PTP_CLASS_V2: 4111 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2); 4112 mtype = hdr->tsmt & 15; 4113 /* Direct PTP Sync messages to queue 1 */ 4114 if (mtype == 0) { 4115 ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT; 4116 queue = &port->tx_hwtstamp_queue[1]; 4117 } 4118 break; 4119 } 4120 4121 /* Take a reference on the skb and insert into our queue */ 4122 i = queue->next; 4123 queue->next = (i + 1) & 31; 4124 if (queue->skb[i]) 4125 dev_kfree_skb_any(queue->skb[i]); 4126 queue->skb[i] = skb_get(skb); 4127 4128 ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i); 4129 4130 /* 4131 * 3:0 - PTPAction 4132 * 6:4 - PTPPacketFormat 4133 * 7 - PTP_CF_WraparoundCheckEn 4134 * 9:8 - IngressTimestampSeconds[1:0] 4135 * 10 - Reserved 4136 * 11 - MACTimestampingEn 4137 * 17:12 - PTP_TimestampQueueEntryID[5:0] 4138 * 18 - PTPTimestampQueueSelect 4139 * 19 - UDPChecksumUpdateEn 4140 * 27:20 - TimestampOffset 4141 * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header 4142 * NTPTs, Y.1731 - L3 to timestamp entry 4143 * 35:28 - UDP Checksum Offset 4144 * 4145 * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12) 4146 */ 4147 tx_desc->pp22.ptp_descriptor &= 4148 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); 4149 tx_desc->pp22.ptp_descriptor |= 4150 cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW); 4151 tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL); 4152 tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40); 4153 4154 return true; 4155 } 4156 4157 /* Handle tx fragmentation processing */ 4158 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, 4159 struct mvpp2_tx_queue *aggr_txq, 4160 struct mvpp2_tx_queue *txq) 4161 { 4162 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4163 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 4164 struct mvpp2_tx_desc *tx_desc; 4165 int i; 4166 dma_addr_t buf_dma_addr; 4167 4168 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4169 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4170 void *addr = skb_frag_address(frag); 4171 4172 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4173 mvpp2_txdesc_clear_ptp(port, tx_desc); 4174 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4175 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); 4176 4177 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, 4178 skb_frag_size(frag), 4179 DMA_TO_DEVICE); 4180 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { 4181 mvpp2_txq_desc_put(txq); 4182 goto cleanup; 4183 } 4184 4185 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 4186 4187 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 4188 /* Last descriptor */ 4189 mvpp2_txdesc_cmd_set(port, tx_desc, 4190 MVPP2_TXD_L_DESC); 4191 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); 4192 } else { 4193 /* Descriptor in the middle: Not First, Not Last */ 4194 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 4195 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4196 } 4197 } 4198 4199 return 0; 4200 cleanup: 4201 /* Release all descriptors that were used to map fragments of 4202 * this packet, as well as the corresponding DMA mappings 4203 */ 4204 for (i = i - 1; i >= 0; i--) { 4205 tx_desc = txq->descs + i; 4206 tx_desc_unmap_put(port, txq, tx_desc); 4207 } 4208 4209 return -ENOMEM; 4210 } 4211 4212 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, 4213 struct net_device *dev, 4214 struct mvpp2_tx_queue *txq, 4215 struct mvpp2_tx_queue *aggr_txq, 4216 struct mvpp2_txq_pcpu *txq_pcpu, 4217 int hdr_sz) 4218 { 4219 struct mvpp2_port *port = netdev_priv(dev); 4220 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4221 dma_addr_t addr; 4222 4223 mvpp2_txdesc_clear_ptp(port, tx_desc); 4224 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4225 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); 4226 4227 addr = txq_pcpu->tso_headers_dma + 4228 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 4229 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); 4230 4231 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | 4232 MVPP2_TXD_F_DESC | 4233 MVPP2_TXD_PADDING_DISABLE); 4234 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4235 } 4236 4237 static inline int mvpp2_tso_put_data(struct sk_buff *skb, 4238 struct net_device *dev, struct tso_t *tso, 4239 struct mvpp2_tx_queue *txq, 4240 struct mvpp2_tx_queue *aggr_txq, 4241 struct mvpp2_txq_pcpu *txq_pcpu, 4242 int sz, bool left, bool last) 4243 { 4244 struct mvpp2_port *port = netdev_priv(dev); 4245 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4246 dma_addr_t buf_dma_addr; 4247 4248 mvpp2_txdesc_clear_ptp(port, tx_desc); 4249 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4250 mvpp2_txdesc_size_set(port, tx_desc, sz); 4251 4252 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, 4253 DMA_TO_DEVICE); 4254 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 4255 mvpp2_txq_desc_put(txq); 4256 return -ENOMEM; 4257 } 4258 4259 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 4260 4261 if (!left) { 4262 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); 4263 if (last) { 4264 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); 4265 return 0; 4266 } 4267 } else { 4268 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 4269 } 4270 4271 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4272 return 0; 4273 } 4274 4275 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, 4276 struct mvpp2_tx_queue *txq, 4277 struct mvpp2_tx_queue *aggr_txq, 4278 struct mvpp2_txq_pcpu *txq_pcpu) 4279 { 4280 struct mvpp2_port *port = netdev_priv(dev); 4281 int hdr_sz, i, len, descs = 0; 4282 struct tso_t tso; 4283 4284 /* Check number of available descriptors */ 4285 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || 4286 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 4287 tso_count_descs(skb))) 4288 return 0; 4289 4290 hdr_sz = tso_start(skb, &tso); 4291 4292 len = skb->len - hdr_sz; 4293 while (len > 0) { 4294 int left = min_t(int, skb_shinfo(skb)->gso_size, len); 4295 char *hdr = txq_pcpu->tso_headers + 4296 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 4297 4298 len -= left; 4299 descs++; 4300 4301 tso_build_hdr(skb, hdr, &tso, left, len == 0); 4302 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); 4303 4304 while (left > 0) { 4305 int sz = min_t(int, tso.size, left); 4306 left -= sz; 4307 descs++; 4308 4309 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, 4310 txq_pcpu, sz, left, len == 0)) 4311 goto release; 4312 tso_build_data(skb, &tso, sz); 4313 } 4314 } 4315 4316 return descs; 4317 4318 release: 4319 for (i = descs - 1; i >= 0; i--) { 4320 struct mvpp2_tx_desc *tx_desc = txq->descs + i; 4321 tx_desc_unmap_put(port, txq, tx_desc); 4322 } 4323 return 0; 4324 } 4325 4326 /* Main tx processing */ 4327 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) 4328 { 4329 struct mvpp2_port *port = netdev_priv(dev); 4330 struct mvpp2_tx_queue *txq, *aggr_txq; 4331 struct mvpp2_txq_pcpu *txq_pcpu; 4332 struct mvpp2_tx_desc *tx_desc; 4333 dma_addr_t buf_dma_addr; 4334 unsigned long flags = 0; 4335 unsigned int thread; 4336 int frags = 0; 4337 u16 txq_id; 4338 u32 tx_cmd; 4339 4340 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4341 4342 txq_id = skb_get_queue_mapping(skb); 4343 txq = port->txqs[txq_id]; 4344 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 4345 aggr_txq = &port->priv->aggr_txqs[thread]; 4346 4347 if (test_bit(thread, &port->priv->lock_map)) 4348 spin_lock_irqsave(&port->tx_lock[thread], flags); 4349 4350 if (skb_is_gso(skb)) { 4351 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); 4352 goto out; 4353 } 4354 frags = skb_shinfo(skb)->nr_frags + 1; 4355 4356 /* Check number of available descriptors */ 4357 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || 4358 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { 4359 frags = 0; 4360 goto out; 4361 } 4362 4363 /* Get a descriptor for the first part of the packet */ 4364 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4365 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) || 4366 !mvpp2_tx_hw_tstamp(port, tx_desc, skb)) 4367 mvpp2_txdesc_clear_ptp(port, tx_desc); 4368 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4369 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); 4370 4371 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, 4372 skb_headlen(skb), DMA_TO_DEVICE); 4373 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 4374 mvpp2_txq_desc_put(txq); 4375 frags = 0; 4376 goto out; 4377 } 4378 4379 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 4380 4381 tx_cmd = mvpp2_skb_tx_csum(port, skb); 4382 4383 if (frags == 1) { 4384 /* First and Last descriptor */ 4385 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 4386 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 4387 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); 4388 } else { 4389 /* First but not Last */ 4390 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 4391 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 4392 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4393 4394 /* Continue with other skb fragments */ 4395 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 4396 tx_desc_unmap_put(port, txq, tx_desc); 4397 frags = 0; 4398 } 4399 } 4400 4401 out: 4402 if (frags > 0) { 4403 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); 4404 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 4405 4406 txq_pcpu->reserved_num -= frags; 4407 txq_pcpu->count += frags; 4408 aggr_txq->count += frags; 4409 4410 /* Enable transmit */ 4411 wmb(); 4412 mvpp2_aggr_txq_pend_desc_add(port, frags); 4413 4414 if (txq_pcpu->count >= txq_pcpu->stop_threshold) 4415 netif_tx_stop_queue(nq); 4416 4417 u64_stats_update_begin(&stats->syncp); 4418 stats->tx_packets++; 4419 stats->tx_bytes += skb->len; 4420 u64_stats_update_end(&stats->syncp); 4421 } else { 4422 dev->stats.tx_dropped++; 4423 dev_kfree_skb_any(skb); 4424 } 4425 4426 /* Finalize TX processing */ 4427 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) 4428 mvpp2_txq_done(port, txq, txq_pcpu); 4429 4430 /* Set the timer in case not all frags were processed */ 4431 if (!port->has_tx_irqs && txq_pcpu->count <= frags && 4432 txq_pcpu->count > 0) { 4433 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); 4434 4435 if (!port_pcpu->timer_scheduled) { 4436 port_pcpu->timer_scheduled = true; 4437 hrtimer_start(&port_pcpu->tx_done_timer, 4438 MVPP2_TXDONE_HRTIMER_PERIOD_NS, 4439 HRTIMER_MODE_REL_PINNED_SOFT); 4440 } 4441 } 4442 4443 if (test_bit(thread, &port->priv->lock_map)) 4444 spin_unlock_irqrestore(&port->tx_lock[thread], flags); 4445 4446 return NETDEV_TX_OK; 4447 } 4448 4449 static inline void mvpp2_cause_error(struct net_device *dev, int cause) 4450 { 4451 if (cause & MVPP2_CAUSE_FCS_ERR_MASK) 4452 netdev_err(dev, "FCS error\n"); 4453 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) 4454 netdev_err(dev, "rx fifo overrun error\n"); 4455 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) 4456 netdev_err(dev, "tx fifo underrun error\n"); 4457 } 4458 4459 static int mvpp2_poll(struct napi_struct *napi, int budget) 4460 { 4461 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; 4462 int rx_done = 0; 4463 struct mvpp2_port *port = netdev_priv(napi->dev); 4464 struct mvpp2_queue_vector *qv; 4465 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4466 4467 qv = container_of(napi, struct mvpp2_queue_vector, napi); 4468 4469 /* Rx/Tx cause register 4470 * 4471 * Bits 0-15: each bit indicates received packets on the Rx queue 4472 * (bit 0 is for Rx queue 0). 4473 * 4474 * Bits 16-23: each bit indicates transmitted packets on the Tx queue 4475 * (bit 16 is for Tx queue 0). 4476 * 4477 * Each CPU has its own Rx/Tx cause register 4478 */ 4479 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, 4480 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 4481 4482 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 4483 if (cause_misc) { 4484 mvpp2_cause_error(port->dev, cause_misc); 4485 4486 /* Clear the cause register */ 4487 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); 4488 mvpp2_thread_write(port->priv, thread, 4489 MVPP2_ISR_RX_TX_CAUSE_REG(port->id), 4490 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 4491 } 4492 4493 if (port->has_tx_irqs) { 4494 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 4495 if (cause_tx) { 4496 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; 4497 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); 4498 } 4499 } 4500 4501 /* Process RX packets */ 4502 cause_rx = cause_rx_tx & 4503 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); 4504 cause_rx <<= qv->first_rxq; 4505 cause_rx |= qv->pending_cause_rx; 4506 while (cause_rx && budget > 0) { 4507 int count; 4508 struct mvpp2_rx_queue *rxq; 4509 4510 rxq = mvpp2_get_rx_queue(port, cause_rx); 4511 if (!rxq) 4512 break; 4513 4514 count = mvpp2_rx(port, napi, budget, rxq); 4515 rx_done += count; 4516 budget -= count; 4517 if (budget > 0) { 4518 /* Clear the bit associated to this Rx queue 4519 * so that next iteration will continue from 4520 * the next Rx queue. 4521 */ 4522 cause_rx &= ~(1 << rxq->logic_rxq); 4523 } 4524 } 4525 4526 if (budget > 0) { 4527 cause_rx = 0; 4528 napi_complete_done(napi, rx_done); 4529 4530 mvpp2_qvec_interrupt_enable(qv); 4531 } 4532 qv->pending_cause_rx = cause_rx; 4533 return rx_done; 4534 } 4535 4536 static void mvpp22_mode_reconfigure(struct mvpp2_port *port, 4537 phy_interface_t interface) 4538 { 4539 u32 ctrl3; 4540 4541 /* Set the GMAC & XLG MAC in reset */ 4542 mvpp2_mac_reset_assert(port); 4543 4544 /* Set the MPCS and XPCS in reset */ 4545 mvpp22_pcs_reset_assert(port); 4546 4547 /* comphy reconfiguration */ 4548 mvpp22_comphy_init(port, interface); 4549 4550 /* gop reconfiguration */ 4551 mvpp22_gop_init(port, interface); 4552 4553 mvpp22_pcs_reset_deassert(port, interface); 4554 4555 if (mvpp2_port_supports_xlg(port)) { 4556 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); 4557 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 4558 4559 if (mvpp2_is_xlg(interface)) 4560 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; 4561 else 4562 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 4563 4564 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); 4565 } 4566 4567 if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface)) 4568 mvpp2_xlg_max_rx_size_set(port); 4569 else 4570 mvpp2_gmac_max_rx_size_set(port); 4571 } 4572 4573 /* Set hw internals when starting port */ 4574 static void mvpp2_start_dev(struct mvpp2_port *port) 4575 { 4576 int i; 4577 4578 mvpp2_txp_max_tx_size_set(port); 4579 4580 for (i = 0; i < port->nqvecs; i++) 4581 napi_enable(&port->qvecs[i].napi); 4582 4583 /* Enable interrupts on all threads */ 4584 mvpp2_interrupts_enable(port); 4585 4586 if (port->priv->hw_version >= MVPP22) 4587 mvpp22_mode_reconfigure(port, port->phy_interface); 4588 4589 if (port->phylink) { 4590 phylink_start(port->phylink); 4591 } else { 4592 mvpp2_acpi_start(port); 4593 } 4594 4595 netif_tx_start_all_queues(port->dev); 4596 4597 clear_bit(0, &port->state); 4598 } 4599 4600 /* Set hw internals when stopping port */ 4601 static void mvpp2_stop_dev(struct mvpp2_port *port) 4602 { 4603 int i; 4604 4605 set_bit(0, &port->state); 4606 4607 /* Disable interrupts on all threads */ 4608 mvpp2_interrupts_disable(port); 4609 4610 for (i = 0; i < port->nqvecs; i++) 4611 napi_disable(&port->qvecs[i].napi); 4612 4613 if (port->phylink) 4614 phylink_stop(port->phylink); 4615 phy_power_off(port->comphy); 4616 } 4617 4618 static int mvpp2_check_ringparam_valid(struct net_device *dev, 4619 struct ethtool_ringparam *ring) 4620 { 4621 u16 new_rx_pending = ring->rx_pending; 4622 u16 new_tx_pending = ring->tx_pending; 4623 4624 if (ring->rx_pending == 0 || ring->tx_pending == 0) 4625 return -EINVAL; 4626 4627 if (ring->rx_pending > MVPP2_MAX_RXD_MAX) 4628 new_rx_pending = MVPP2_MAX_RXD_MAX; 4629 else if (ring->rx_pending < MSS_THRESHOLD_START) 4630 new_rx_pending = MSS_THRESHOLD_START; 4631 else if (!IS_ALIGNED(ring->rx_pending, 16)) 4632 new_rx_pending = ALIGN(ring->rx_pending, 16); 4633 4634 if (ring->tx_pending > MVPP2_MAX_TXD_MAX) 4635 new_tx_pending = MVPP2_MAX_TXD_MAX; 4636 else if (!IS_ALIGNED(ring->tx_pending, 32)) 4637 new_tx_pending = ALIGN(ring->tx_pending, 32); 4638 4639 /* The Tx ring size cannot be smaller than the minimum number of 4640 * descriptors needed for TSO. 4641 */ 4642 if (new_tx_pending < MVPP2_MAX_SKB_DESCS) 4643 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); 4644 4645 if (ring->rx_pending != new_rx_pending) { 4646 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", 4647 ring->rx_pending, new_rx_pending); 4648 ring->rx_pending = new_rx_pending; 4649 } 4650 4651 if (ring->tx_pending != new_tx_pending) { 4652 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", 4653 ring->tx_pending, new_tx_pending); 4654 ring->tx_pending = new_tx_pending; 4655 } 4656 4657 return 0; 4658 } 4659 4660 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) 4661 { 4662 u32 mac_addr_l, mac_addr_m, mac_addr_h; 4663 4664 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 4665 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); 4666 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); 4667 addr[0] = (mac_addr_h >> 24) & 0xFF; 4668 addr[1] = (mac_addr_h >> 16) & 0xFF; 4669 addr[2] = (mac_addr_h >> 8) & 0xFF; 4670 addr[3] = mac_addr_h & 0xFF; 4671 addr[4] = mac_addr_m & 0xFF; 4672 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; 4673 } 4674 4675 static int mvpp2_irqs_init(struct mvpp2_port *port) 4676 { 4677 int err, i; 4678 4679 for (i = 0; i < port->nqvecs; i++) { 4680 struct mvpp2_queue_vector *qv = port->qvecs + i; 4681 4682 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { 4683 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); 4684 if (!qv->mask) { 4685 err = -ENOMEM; 4686 goto err; 4687 } 4688 4689 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); 4690 } 4691 4692 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); 4693 if (err) 4694 goto err; 4695 4696 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { 4697 unsigned int cpu; 4698 4699 for_each_present_cpu(cpu) { 4700 if (mvpp2_cpu_to_thread(port->priv, cpu) == 4701 qv->sw_thread_id) 4702 cpumask_set_cpu(cpu, qv->mask); 4703 } 4704 4705 irq_set_affinity_hint(qv->irq, qv->mask); 4706 } 4707 } 4708 4709 return 0; 4710 err: 4711 for (i = 0; i < port->nqvecs; i++) { 4712 struct mvpp2_queue_vector *qv = port->qvecs + i; 4713 4714 irq_set_affinity_hint(qv->irq, NULL); 4715 kfree(qv->mask); 4716 qv->mask = NULL; 4717 free_irq(qv->irq, qv); 4718 } 4719 4720 return err; 4721 } 4722 4723 static void mvpp2_irqs_deinit(struct mvpp2_port *port) 4724 { 4725 int i; 4726 4727 for (i = 0; i < port->nqvecs; i++) { 4728 struct mvpp2_queue_vector *qv = port->qvecs + i; 4729 4730 irq_set_affinity_hint(qv->irq, NULL); 4731 kfree(qv->mask); 4732 qv->mask = NULL; 4733 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); 4734 free_irq(qv->irq, qv); 4735 } 4736 } 4737 4738 static bool mvpp22_rss_is_supported(struct mvpp2_port *port) 4739 { 4740 return (queue_mode == MVPP2_QDIST_MULTI_MODE) && 4741 !(port->flags & MVPP2_F_LOOPBACK); 4742 } 4743 4744 static int mvpp2_open(struct net_device *dev) 4745 { 4746 struct mvpp2_port *port = netdev_priv(dev); 4747 struct mvpp2 *priv = port->priv; 4748 unsigned char mac_bcast[ETH_ALEN] = { 4749 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 4750 bool valid = false; 4751 int err; 4752 4753 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); 4754 if (err) { 4755 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 4756 return err; 4757 } 4758 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); 4759 if (err) { 4760 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); 4761 return err; 4762 } 4763 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); 4764 if (err) { 4765 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); 4766 return err; 4767 } 4768 err = mvpp2_prs_def_flow(port); 4769 if (err) { 4770 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 4771 return err; 4772 } 4773 4774 /* Allocate the Rx/Tx queues */ 4775 err = mvpp2_setup_rxqs(port); 4776 if (err) { 4777 netdev_err(port->dev, "cannot allocate Rx queues\n"); 4778 return err; 4779 } 4780 4781 err = mvpp2_setup_txqs(port); 4782 if (err) { 4783 netdev_err(port->dev, "cannot allocate Tx queues\n"); 4784 goto err_cleanup_rxqs; 4785 } 4786 4787 err = mvpp2_irqs_init(port); 4788 if (err) { 4789 netdev_err(port->dev, "cannot init IRQs\n"); 4790 goto err_cleanup_txqs; 4791 } 4792 4793 if (port->phylink) { 4794 err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0); 4795 if (err) { 4796 netdev_err(port->dev, "could not attach PHY (%d)\n", 4797 err); 4798 goto err_free_irq; 4799 } 4800 4801 valid = true; 4802 } 4803 4804 if (priv->hw_version >= MVPP22 && port->port_irq) { 4805 err = request_irq(port->port_irq, mvpp2_port_isr, 0, 4806 dev->name, port); 4807 if (err) { 4808 netdev_err(port->dev, 4809 "cannot request port link/ptp IRQ %d\n", 4810 port->port_irq); 4811 goto err_free_irq; 4812 } 4813 4814 mvpp22_gop_setup_irq(port); 4815 4816 /* In default link is down */ 4817 netif_carrier_off(port->dev); 4818 4819 valid = true; 4820 } else { 4821 port->port_irq = 0; 4822 } 4823 4824 if (!valid) { 4825 netdev_err(port->dev, 4826 "invalid configuration: no dt or link IRQ"); 4827 err = -ENOENT; 4828 goto err_free_irq; 4829 } 4830 4831 /* Unmask interrupts on all CPUs */ 4832 on_each_cpu(mvpp2_interrupts_unmask, port, 1); 4833 mvpp2_shared_interrupt_mask_unmask(port, false); 4834 4835 mvpp2_start_dev(port); 4836 4837 /* Start hardware statistics gathering */ 4838 queue_delayed_work(priv->stats_queue, &port->stats_work, 4839 MVPP2_MIB_COUNTERS_STATS_DELAY); 4840 4841 return 0; 4842 4843 err_free_irq: 4844 mvpp2_irqs_deinit(port); 4845 err_cleanup_txqs: 4846 mvpp2_cleanup_txqs(port); 4847 err_cleanup_rxqs: 4848 mvpp2_cleanup_rxqs(port); 4849 return err; 4850 } 4851 4852 static int mvpp2_stop(struct net_device *dev) 4853 { 4854 struct mvpp2_port *port = netdev_priv(dev); 4855 struct mvpp2_port_pcpu *port_pcpu; 4856 unsigned int thread; 4857 4858 mvpp2_stop_dev(port); 4859 4860 /* Mask interrupts on all threads */ 4861 on_each_cpu(mvpp2_interrupts_mask, port, 1); 4862 mvpp2_shared_interrupt_mask_unmask(port, true); 4863 4864 if (port->phylink) 4865 phylink_disconnect_phy(port->phylink); 4866 if (port->port_irq) 4867 free_irq(port->port_irq, port); 4868 4869 mvpp2_irqs_deinit(port); 4870 if (!port->has_tx_irqs) { 4871 for (thread = 0; thread < port->priv->nthreads; thread++) { 4872 port_pcpu = per_cpu_ptr(port->pcpu, thread); 4873 4874 hrtimer_cancel(&port_pcpu->tx_done_timer); 4875 port_pcpu->timer_scheduled = false; 4876 } 4877 } 4878 mvpp2_cleanup_rxqs(port); 4879 mvpp2_cleanup_txqs(port); 4880 4881 cancel_delayed_work_sync(&port->stats_work); 4882 4883 mvpp2_mac_reset_assert(port); 4884 mvpp22_pcs_reset_assert(port); 4885 4886 return 0; 4887 } 4888 4889 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, 4890 struct netdev_hw_addr_list *list) 4891 { 4892 struct netdev_hw_addr *ha; 4893 int ret; 4894 4895 netdev_hw_addr_list_for_each(ha, list) { 4896 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); 4897 if (ret) 4898 return ret; 4899 } 4900 4901 return 0; 4902 } 4903 4904 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) 4905 { 4906 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 4907 mvpp2_prs_vid_enable_filtering(port); 4908 else 4909 mvpp2_prs_vid_disable_filtering(port); 4910 4911 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4912 MVPP2_PRS_L2_UNI_CAST, enable); 4913 4914 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4915 MVPP2_PRS_L2_MULTI_CAST, enable); 4916 } 4917 4918 static void mvpp2_set_rx_mode(struct net_device *dev) 4919 { 4920 struct mvpp2_port *port = netdev_priv(dev); 4921 4922 /* Clear the whole UC and MC list */ 4923 mvpp2_prs_mac_del_all(port); 4924 4925 if (dev->flags & IFF_PROMISC) { 4926 mvpp2_set_rx_promisc(port, true); 4927 return; 4928 } 4929 4930 mvpp2_set_rx_promisc(port, false); 4931 4932 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || 4933 mvpp2_prs_mac_da_accept_list(port, &dev->uc)) 4934 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4935 MVPP2_PRS_L2_UNI_CAST, true); 4936 4937 if (dev->flags & IFF_ALLMULTI) { 4938 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4939 MVPP2_PRS_L2_MULTI_CAST, true); 4940 return; 4941 } 4942 4943 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || 4944 mvpp2_prs_mac_da_accept_list(port, &dev->mc)) 4945 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4946 MVPP2_PRS_L2_MULTI_CAST, true); 4947 } 4948 4949 static int mvpp2_set_mac_address(struct net_device *dev, void *p) 4950 { 4951 const struct sockaddr *addr = p; 4952 int err; 4953 4954 if (!is_valid_ether_addr(addr->sa_data)) 4955 return -EADDRNOTAVAIL; 4956 4957 err = mvpp2_prs_update_mac_da(dev, addr->sa_data); 4958 if (err) { 4959 /* Reconfigure parser accept the original MAC address */ 4960 mvpp2_prs_update_mac_da(dev, dev->dev_addr); 4961 netdev_err(dev, "failed to change MAC address\n"); 4962 } 4963 return err; 4964 } 4965 4966 /* Shut down all the ports, reconfigure the pools as percpu or shared, 4967 * then bring up again all ports. 4968 */ 4969 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) 4970 { 4971 bool change_percpu = (percpu != priv->percpu_pools); 4972 int numbufs = MVPP2_BM_POOLS_NUM, i; 4973 struct mvpp2_port *port = NULL; 4974 bool status[MVPP2_MAX_PORTS]; 4975 4976 for (i = 0; i < priv->port_count; i++) { 4977 port = priv->port_list[i]; 4978 status[i] = netif_running(port->dev); 4979 if (status[i]) 4980 mvpp2_stop(port->dev); 4981 } 4982 4983 /* nrxqs is the same for all ports */ 4984 if (priv->percpu_pools) 4985 numbufs = port->nrxqs * 2; 4986 4987 if (change_percpu) 4988 mvpp2_bm_pool_update_priv_fc(priv, false); 4989 4990 for (i = 0; i < numbufs; i++) 4991 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); 4992 4993 devm_kfree(port->dev->dev.parent, priv->bm_pools); 4994 priv->percpu_pools = percpu; 4995 mvpp2_bm_init(port->dev->dev.parent, priv); 4996 4997 for (i = 0; i < priv->port_count; i++) { 4998 port = priv->port_list[i]; 4999 if (percpu && port->ntxqs >= num_possible_cpus() * 2) 5000 xdp_set_features_flag(port->dev, 5001 NETDEV_XDP_ACT_BASIC | 5002 NETDEV_XDP_ACT_REDIRECT | 5003 NETDEV_XDP_ACT_NDO_XMIT); 5004 else 5005 xdp_clear_features_flag(port->dev); 5006 5007 mvpp2_swf_bm_pool_init(port); 5008 if (status[i]) 5009 mvpp2_open(port->dev); 5010 } 5011 5012 if (change_percpu) 5013 mvpp2_bm_pool_update_priv_fc(priv, true); 5014 5015 return 0; 5016 } 5017 5018 static int mvpp2_change_mtu(struct net_device *dev, int mtu) 5019 { 5020 struct mvpp2_port *port = netdev_priv(dev); 5021 bool running = netif_running(dev); 5022 struct mvpp2 *priv = port->priv; 5023 int err; 5024 5025 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { 5026 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, 5027 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); 5028 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); 5029 } 5030 5031 if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { 5032 netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n", 5033 mtu, (int)MVPP2_MAX_RX_BUF_SIZE); 5034 return -EINVAL; 5035 } 5036 5037 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { 5038 if (priv->percpu_pools) { 5039 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); 5040 mvpp2_bm_switch_buffers(priv, false); 5041 } 5042 } else { 5043 bool jumbo = false; 5044 int i; 5045 5046 for (i = 0; i < priv->port_count; i++) 5047 if (priv->port_list[i] != port && 5048 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > 5049 MVPP2_BM_LONG_PKT_SIZE) { 5050 jumbo = true; 5051 break; 5052 } 5053 5054 /* No port is using jumbo frames */ 5055 if (!jumbo) { 5056 dev_info(port->dev->dev.parent, 5057 "all ports have a low MTU, switching to per-cpu buffers"); 5058 mvpp2_bm_switch_buffers(priv, true); 5059 } 5060 } 5061 5062 if (running) 5063 mvpp2_stop_dev(port); 5064 5065 err = mvpp2_bm_update_mtu(dev, mtu); 5066 if (err) { 5067 netdev_err(dev, "failed to change MTU\n"); 5068 /* Reconfigure BM to the original MTU */ 5069 mvpp2_bm_update_mtu(dev, dev->mtu); 5070 } else { 5071 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); 5072 } 5073 5074 if (running) { 5075 mvpp2_start_dev(port); 5076 mvpp2_egress_enable(port); 5077 mvpp2_ingress_enable(port); 5078 } 5079 5080 return err; 5081 } 5082 5083 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) 5084 { 5085 enum dma_data_direction dma_dir = DMA_FROM_DEVICE; 5086 struct mvpp2 *priv = port->priv; 5087 int err = -1, i; 5088 5089 if (!priv->percpu_pools) 5090 return err; 5091 5092 if (!priv->page_pool[0]) 5093 return -ENOMEM; 5094 5095 for (i = 0; i < priv->port_count; i++) { 5096 port = priv->port_list[i]; 5097 if (port->xdp_prog) { 5098 dma_dir = DMA_BIDIRECTIONAL; 5099 break; 5100 } 5101 } 5102 5103 /* All pools are equal in terms of DMA direction */ 5104 if (priv->page_pool[0]->p.dma_dir != dma_dir) 5105 err = mvpp2_bm_switch_buffers(priv, true); 5106 5107 return err; 5108 } 5109 5110 static void 5111 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 5112 { 5113 struct mvpp2_port *port = netdev_priv(dev); 5114 unsigned int start; 5115 unsigned int cpu; 5116 5117 for_each_possible_cpu(cpu) { 5118 struct mvpp2_pcpu_stats *cpu_stats; 5119 u64 rx_packets; 5120 u64 rx_bytes; 5121 u64 tx_packets; 5122 u64 tx_bytes; 5123 5124 cpu_stats = per_cpu_ptr(port->stats, cpu); 5125 do { 5126 start = u64_stats_fetch_begin(&cpu_stats->syncp); 5127 rx_packets = cpu_stats->rx_packets; 5128 rx_bytes = cpu_stats->rx_bytes; 5129 tx_packets = cpu_stats->tx_packets; 5130 tx_bytes = cpu_stats->tx_bytes; 5131 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); 5132 5133 stats->rx_packets += rx_packets; 5134 stats->rx_bytes += rx_bytes; 5135 stats->tx_packets += tx_packets; 5136 stats->tx_bytes += tx_bytes; 5137 } 5138 5139 stats->rx_errors = dev->stats.rx_errors; 5140 stats->rx_dropped = dev->stats.rx_dropped; 5141 stats->tx_dropped = dev->stats.tx_dropped; 5142 } 5143 5144 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) 5145 { 5146 struct hwtstamp_config config; 5147 void __iomem *ptp; 5148 u32 gcr, int_mask; 5149 5150 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 5151 return -EFAULT; 5152 5153 if (config.tx_type != HWTSTAMP_TX_OFF && 5154 config.tx_type != HWTSTAMP_TX_ON) 5155 return -ERANGE; 5156 5157 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); 5158 5159 int_mask = gcr = 0; 5160 if (config.tx_type != HWTSTAMP_TX_OFF) { 5161 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; 5162 int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | 5163 MVPP22_PTP_INT_MASK_QUEUE0; 5164 } 5165 5166 /* It seems we must also release the TX reset when enabling the TSU */ 5167 if (config.rx_filter != HWTSTAMP_FILTER_NONE) 5168 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | 5169 MVPP22_PTP_GCR_TX_RESET; 5170 5171 if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) 5172 mvpp22_tai_start(port->priv->tai); 5173 5174 if (config.rx_filter != HWTSTAMP_FILTER_NONE) { 5175 config.rx_filter = HWTSTAMP_FILTER_ALL; 5176 mvpp2_modify(ptp + MVPP22_PTP_GCR, 5177 MVPP22_PTP_GCR_RX_RESET | 5178 MVPP22_PTP_GCR_TX_RESET | 5179 MVPP22_PTP_GCR_TSU_ENABLE, gcr); 5180 port->rx_hwtstamp = true; 5181 } else { 5182 port->rx_hwtstamp = false; 5183 mvpp2_modify(ptp + MVPP22_PTP_GCR, 5184 MVPP22_PTP_GCR_RX_RESET | 5185 MVPP22_PTP_GCR_TX_RESET | 5186 MVPP22_PTP_GCR_TSU_ENABLE, gcr); 5187 } 5188 5189 mvpp2_modify(ptp + MVPP22_PTP_INT_MASK, 5190 MVPP22_PTP_INT_MASK_QUEUE1 | 5191 MVPP22_PTP_INT_MASK_QUEUE0, int_mask); 5192 5193 if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) 5194 mvpp22_tai_stop(port->priv->tai); 5195 5196 port->tx_hwtstamp_type = config.tx_type; 5197 5198 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 5199 return -EFAULT; 5200 5201 return 0; 5202 } 5203 5204 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) 5205 { 5206 struct hwtstamp_config config; 5207 5208 memset(&config, 0, sizeof(config)); 5209 5210 config.tx_type = port->tx_hwtstamp_type; 5211 config.rx_filter = port->rx_hwtstamp ? 5212 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 5213 5214 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 5215 return -EFAULT; 5216 5217 return 0; 5218 } 5219 5220 static int mvpp2_ethtool_get_ts_info(struct net_device *dev, 5221 struct ethtool_ts_info *info) 5222 { 5223 struct mvpp2_port *port = netdev_priv(dev); 5224 5225 if (!port->hwtstamp) 5226 return -EOPNOTSUPP; 5227 5228 info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai); 5229 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 5230 SOF_TIMESTAMPING_RX_SOFTWARE | 5231 SOF_TIMESTAMPING_SOFTWARE | 5232 SOF_TIMESTAMPING_TX_HARDWARE | 5233 SOF_TIMESTAMPING_RX_HARDWARE | 5234 SOF_TIMESTAMPING_RAW_HARDWARE; 5235 info->tx_types = BIT(HWTSTAMP_TX_OFF) | 5236 BIT(HWTSTAMP_TX_ON); 5237 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 5238 BIT(HWTSTAMP_FILTER_ALL); 5239 5240 return 0; 5241 } 5242 5243 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5244 { 5245 struct mvpp2_port *port = netdev_priv(dev); 5246 5247 switch (cmd) { 5248 case SIOCSHWTSTAMP: 5249 if (port->hwtstamp) 5250 return mvpp2_set_ts_config(port, ifr); 5251 break; 5252 5253 case SIOCGHWTSTAMP: 5254 if (port->hwtstamp) 5255 return mvpp2_get_ts_config(port, ifr); 5256 break; 5257 } 5258 5259 if (!port->phylink) 5260 return -ENOTSUPP; 5261 5262 return phylink_mii_ioctl(port->phylink, ifr, cmd); 5263 } 5264 5265 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 5266 { 5267 struct mvpp2_port *port = netdev_priv(dev); 5268 int ret; 5269 5270 ret = mvpp2_prs_vid_entry_add(port, vid); 5271 if (ret) 5272 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", 5273 MVPP2_PRS_VLAN_FILT_MAX - 1); 5274 return ret; 5275 } 5276 5277 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 5278 { 5279 struct mvpp2_port *port = netdev_priv(dev); 5280 5281 mvpp2_prs_vid_entry_remove(port, vid); 5282 return 0; 5283 } 5284 5285 static int mvpp2_set_features(struct net_device *dev, 5286 netdev_features_t features) 5287 { 5288 netdev_features_t changed = dev->features ^ features; 5289 struct mvpp2_port *port = netdev_priv(dev); 5290 5291 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { 5292 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 5293 mvpp2_prs_vid_enable_filtering(port); 5294 } else { 5295 /* Invalidate all registered VID filters for this 5296 * port 5297 */ 5298 mvpp2_prs_vid_remove_all(port); 5299 5300 mvpp2_prs_vid_disable_filtering(port); 5301 } 5302 } 5303 5304 if (changed & NETIF_F_RXHASH) { 5305 if (features & NETIF_F_RXHASH) 5306 mvpp22_port_rss_enable(port); 5307 else 5308 mvpp22_port_rss_disable(port); 5309 } 5310 5311 return 0; 5312 } 5313 5314 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) 5315 { 5316 struct bpf_prog *prog = bpf->prog, *old_prog; 5317 bool running = netif_running(port->dev); 5318 bool reset = !prog != !port->xdp_prog; 5319 5320 if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { 5321 NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); 5322 return -EOPNOTSUPP; 5323 } 5324 5325 if (!port->priv->percpu_pools) { 5326 NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP"); 5327 return -EOPNOTSUPP; 5328 } 5329 5330 if (port->ntxqs < num_possible_cpus() * 2) { 5331 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU"); 5332 return -EOPNOTSUPP; 5333 } 5334 5335 /* device is up and bpf is added/removed, must setup the RX queues */ 5336 if (running && reset) 5337 mvpp2_stop(port->dev); 5338 5339 old_prog = xchg(&port->xdp_prog, prog); 5340 if (old_prog) 5341 bpf_prog_put(old_prog); 5342 5343 /* bpf is just replaced, RXQ and MTU are already setup */ 5344 if (!reset) 5345 return 0; 5346 5347 /* device was up, restore the link */ 5348 if (running) 5349 mvpp2_open(port->dev); 5350 5351 /* Check Page Pool DMA Direction */ 5352 mvpp2_check_pagepool_dma(port); 5353 5354 return 0; 5355 } 5356 5357 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) 5358 { 5359 struct mvpp2_port *port = netdev_priv(dev); 5360 5361 switch (xdp->command) { 5362 case XDP_SETUP_PROG: 5363 return mvpp2_xdp_setup(port, xdp); 5364 default: 5365 return -EINVAL; 5366 } 5367 } 5368 5369 /* Ethtool methods */ 5370 5371 static int mvpp2_ethtool_nway_reset(struct net_device *dev) 5372 { 5373 struct mvpp2_port *port = netdev_priv(dev); 5374 5375 if (!port->phylink) 5376 return -ENOTSUPP; 5377 5378 return phylink_ethtool_nway_reset(port->phylink); 5379 } 5380 5381 /* Set interrupt coalescing for ethtools */ 5382 static int 5383 mvpp2_ethtool_set_coalesce(struct net_device *dev, 5384 struct ethtool_coalesce *c, 5385 struct kernel_ethtool_coalesce *kernel_coal, 5386 struct netlink_ext_ack *extack) 5387 { 5388 struct mvpp2_port *port = netdev_priv(dev); 5389 int queue; 5390 5391 for (queue = 0; queue < port->nrxqs; queue++) { 5392 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 5393 5394 rxq->time_coal = c->rx_coalesce_usecs; 5395 rxq->pkts_coal = c->rx_max_coalesced_frames; 5396 mvpp2_rx_pkts_coal_set(port, rxq); 5397 mvpp2_rx_time_coal_set(port, rxq); 5398 } 5399 5400 if (port->has_tx_irqs) { 5401 port->tx_time_coal = c->tx_coalesce_usecs; 5402 mvpp2_tx_time_coal_set(port); 5403 } 5404 5405 for (queue = 0; queue < port->ntxqs; queue++) { 5406 struct mvpp2_tx_queue *txq = port->txqs[queue]; 5407 5408 txq->done_pkts_coal = c->tx_max_coalesced_frames; 5409 5410 if (port->has_tx_irqs) 5411 mvpp2_tx_pkts_coal_set(port, txq); 5412 } 5413 5414 return 0; 5415 } 5416 5417 /* get coalescing for ethtools */ 5418 static int 5419 mvpp2_ethtool_get_coalesce(struct net_device *dev, 5420 struct ethtool_coalesce *c, 5421 struct kernel_ethtool_coalesce *kernel_coal, 5422 struct netlink_ext_ack *extack) 5423 { 5424 struct mvpp2_port *port = netdev_priv(dev); 5425 5426 c->rx_coalesce_usecs = port->rxqs[0]->time_coal; 5427 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; 5428 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; 5429 c->tx_coalesce_usecs = port->tx_time_coal; 5430 return 0; 5431 } 5432 5433 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, 5434 struct ethtool_drvinfo *drvinfo) 5435 { 5436 strscpy(drvinfo->driver, MVPP2_DRIVER_NAME, 5437 sizeof(drvinfo->driver)); 5438 strscpy(drvinfo->version, MVPP2_DRIVER_VERSION, 5439 sizeof(drvinfo->version)); 5440 strscpy(drvinfo->bus_info, dev_name(&dev->dev), 5441 sizeof(drvinfo->bus_info)); 5442 } 5443 5444 static void 5445 mvpp2_ethtool_get_ringparam(struct net_device *dev, 5446 struct ethtool_ringparam *ring, 5447 struct kernel_ethtool_ringparam *kernel_ring, 5448 struct netlink_ext_ack *extack) 5449 { 5450 struct mvpp2_port *port = netdev_priv(dev); 5451 5452 ring->rx_max_pending = MVPP2_MAX_RXD_MAX; 5453 ring->tx_max_pending = MVPP2_MAX_TXD_MAX; 5454 ring->rx_pending = port->rx_ring_size; 5455 ring->tx_pending = port->tx_ring_size; 5456 } 5457 5458 static int 5459 mvpp2_ethtool_set_ringparam(struct net_device *dev, 5460 struct ethtool_ringparam *ring, 5461 struct kernel_ethtool_ringparam *kernel_ring, 5462 struct netlink_ext_ack *extack) 5463 { 5464 struct mvpp2_port *port = netdev_priv(dev); 5465 u16 prev_rx_ring_size = port->rx_ring_size; 5466 u16 prev_tx_ring_size = port->tx_ring_size; 5467 int err; 5468 5469 err = mvpp2_check_ringparam_valid(dev, ring); 5470 if (err) 5471 return err; 5472 5473 if (!netif_running(dev)) { 5474 port->rx_ring_size = ring->rx_pending; 5475 port->tx_ring_size = ring->tx_pending; 5476 return 0; 5477 } 5478 5479 /* The interface is running, so we have to force a 5480 * reallocation of the queues 5481 */ 5482 mvpp2_stop_dev(port); 5483 mvpp2_cleanup_rxqs(port); 5484 mvpp2_cleanup_txqs(port); 5485 5486 port->rx_ring_size = ring->rx_pending; 5487 port->tx_ring_size = ring->tx_pending; 5488 5489 err = mvpp2_setup_rxqs(port); 5490 if (err) { 5491 /* Reallocate Rx queues with the original ring size */ 5492 port->rx_ring_size = prev_rx_ring_size; 5493 ring->rx_pending = prev_rx_ring_size; 5494 err = mvpp2_setup_rxqs(port); 5495 if (err) 5496 goto err_out; 5497 } 5498 err = mvpp2_setup_txqs(port); 5499 if (err) { 5500 /* Reallocate Tx queues with the original ring size */ 5501 port->tx_ring_size = prev_tx_ring_size; 5502 ring->tx_pending = prev_tx_ring_size; 5503 err = mvpp2_setup_txqs(port); 5504 if (err) 5505 goto err_clean_rxqs; 5506 } 5507 5508 mvpp2_start_dev(port); 5509 mvpp2_egress_enable(port); 5510 mvpp2_ingress_enable(port); 5511 5512 return 0; 5513 5514 err_clean_rxqs: 5515 mvpp2_cleanup_rxqs(port); 5516 err_out: 5517 netdev_err(dev, "failed to change ring parameters"); 5518 return err; 5519 } 5520 5521 static void mvpp2_ethtool_get_pause_param(struct net_device *dev, 5522 struct ethtool_pauseparam *pause) 5523 { 5524 struct mvpp2_port *port = netdev_priv(dev); 5525 5526 if (!port->phylink) 5527 return; 5528 5529 phylink_ethtool_get_pauseparam(port->phylink, pause); 5530 } 5531 5532 static int mvpp2_ethtool_set_pause_param(struct net_device *dev, 5533 struct ethtool_pauseparam *pause) 5534 { 5535 struct mvpp2_port *port = netdev_priv(dev); 5536 5537 if (!port->phylink) 5538 return -ENOTSUPP; 5539 5540 return phylink_ethtool_set_pauseparam(port->phylink, pause); 5541 } 5542 5543 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, 5544 struct ethtool_link_ksettings *cmd) 5545 { 5546 struct mvpp2_port *port = netdev_priv(dev); 5547 5548 if (!port->phylink) 5549 return -ENOTSUPP; 5550 5551 return phylink_ethtool_ksettings_get(port->phylink, cmd); 5552 } 5553 5554 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, 5555 const struct ethtool_link_ksettings *cmd) 5556 { 5557 struct mvpp2_port *port = netdev_priv(dev); 5558 5559 if (!port->phylink) 5560 return -ENOTSUPP; 5561 5562 return phylink_ethtool_ksettings_set(port->phylink, cmd); 5563 } 5564 5565 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, 5566 struct ethtool_rxnfc *info, u32 *rules) 5567 { 5568 struct mvpp2_port *port = netdev_priv(dev); 5569 int ret = 0, i, loc = 0; 5570 5571 if (!mvpp22_rss_is_supported(port)) 5572 return -EOPNOTSUPP; 5573 5574 switch (info->cmd) { 5575 case ETHTOOL_GRXFH: 5576 ret = mvpp2_ethtool_rxfh_get(port, info); 5577 break; 5578 case ETHTOOL_GRXRINGS: 5579 info->data = port->nrxqs; 5580 break; 5581 case ETHTOOL_GRXCLSRLCNT: 5582 info->rule_cnt = port->n_rfs_rules; 5583 break; 5584 case ETHTOOL_GRXCLSRULE: 5585 ret = mvpp2_ethtool_cls_rule_get(port, info); 5586 break; 5587 case ETHTOOL_GRXCLSRLALL: 5588 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { 5589 if (port->rfs_rules[i]) 5590 rules[loc++] = i; 5591 } 5592 break; 5593 default: 5594 return -ENOTSUPP; 5595 } 5596 5597 return ret; 5598 } 5599 5600 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, 5601 struct ethtool_rxnfc *info) 5602 { 5603 struct mvpp2_port *port = netdev_priv(dev); 5604 int ret = 0; 5605 5606 if (!mvpp22_rss_is_supported(port)) 5607 return -EOPNOTSUPP; 5608 5609 switch (info->cmd) { 5610 case ETHTOOL_SRXFH: 5611 ret = mvpp2_ethtool_rxfh_set(port, info); 5612 break; 5613 case ETHTOOL_SRXCLSRLINS: 5614 ret = mvpp2_ethtool_cls_rule_ins(port, info); 5615 break; 5616 case ETHTOOL_SRXCLSRLDEL: 5617 ret = mvpp2_ethtool_cls_rule_del(port, info); 5618 break; 5619 default: 5620 return -EOPNOTSUPP; 5621 } 5622 return ret; 5623 } 5624 5625 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) 5626 { 5627 struct mvpp2_port *port = netdev_priv(dev); 5628 5629 return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0; 5630 } 5631 5632 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 5633 u8 *hfunc) 5634 { 5635 struct mvpp2_port *port = netdev_priv(dev); 5636 int ret = 0; 5637 5638 if (!mvpp22_rss_is_supported(port)) 5639 return -EOPNOTSUPP; 5640 5641 if (indir) 5642 ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir); 5643 5644 if (hfunc) 5645 *hfunc = ETH_RSS_HASH_CRC32; 5646 5647 return ret; 5648 } 5649 5650 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, 5651 const u8 *key, const u8 hfunc) 5652 { 5653 struct mvpp2_port *port = netdev_priv(dev); 5654 int ret = 0; 5655 5656 if (!mvpp22_rss_is_supported(port)) 5657 return -EOPNOTSUPP; 5658 5659 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) 5660 return -EOPNOTSUPP; 5661 5662 if (key) 5663 return -EOPNOTSUPP; 5664 5665 if (indir) 5666 ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir); 5667 5668 return ret; 5669 } 5670 5671 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, 5672 u8 *key, u8 *hfunc, u32 rss_context) 5673 { 5674 struct mvpp2_port *port = netdev_priv(dev); 5675 int ret = 0; 5676 5677 if (!mvpp22_rss_is_supported(port)) 5678 return -EOPNOTSUPP; 5679 if (rss_context >= MVPP22_N_RSS_TABLES) 5680 return -EINVAL; 5681 5682 if (hfunc) 5683 *hfunc = ETH_RSS_HASH_CRC32; 5684 5685 if (indir) 5686 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir); 5687 5688 return ret; 5689 } 5690 5691 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev, 5692 const u32 *indir, const u8 *key, 5693 const u8 hfunc, u32 *rss_context, 5694 bool delete) 5695 { 5696 struct mvpp2_port *port = netdev_priv(dev); 5697 int ret; 5698 5699 if (!mvpp22_rss_is_supported(port)) 5700 return -EOPNOTSUPP; 5701 5702 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) 5703 return -EOPNOTSUPP; 5704 5705 if (key) 5706 return -EOPNOTSUPP; 5707 5708 if (delete) 5709 return mvpp22_port_rss_ctx_delete(port, *rss_context); 5710 5711 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { 5712 ret = mvpp22_port_rss_ctx_create(port, rss_context); 5713 if (ret) 5714 return ret; 5715 } 5716 5717 return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir); 5718 } 5719 /* Device ops */ 5720 5721 static const struct net_device_ops mvpp2_netdev_ops = { 5722 .ndo_open = mvpp2_open, 5723 .ndo_stop = mvpp2_stop, 5724 .ndo_start_xmit = mvpp2_tx, 5725 .ndo_set_rx_mode = mvpp2_set_rx_mode, 5726 .ndo_set_mac_address = mvpp2_set_mac_address, 5727 .ndo_change_mtu = mvpp2_change_mtu, 5728 .ndo_get_stats64 = mvpp2_get_stats64, 5729 .ndo_eth_ioctl = mvpp2_ioctl, 5730 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, 5731 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, 5732 .ndo_set_features = mvpp2_set_features, 5733 .ndo_bpf = mvpp2_xdp, 5734 .ndo_xdp_xmit = mvpp2_xdp_xmit, 5735 }; 5736 5737 static const struct ethtool_ops mvpp2_eth_tool_ops = { 5738 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 5739 ETHTOOL_COALESCE_MAX_FRAMES, 5740 .nway_reset = mvpp2_ethtool_nway_reset, 5741 .get_link = ethtool_op_get_link, 5742 .get_ts_info = mvpp2_ethtool_get_ts_info, 5743 .set_coalesce = mvpp2_ethtool_set_coalesce, 5744 .get_coalesce = mvpp2_ethtool_get_coalesce, 5745 .get_drvinfo = mvpp2_ethtool_get_drvinfo, 5746 .get_ringparam = mvpp2_ethtool_get_ringparam, 5747 .set_ringparam = mvpp2_ethtool_set_ringparam, 5748 .get_strings = mvpp2_ethtool_get_strings, 5749 .get_ethtool_stats = mvpp2_ethtool_get_stats, 5750 .get_sset_count = mvpp2_ethtool_get_sset_count, 5751 .get_pauseparam = mvpp2_ethtool_get_pause_param, 5752 .set_pauseparam = mvpp2_ethtool_set_pause_param, 5753 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, 5754 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, 5755 .get_rxnfc = mvpp2_ethtool_get_rxnfc, 5756 .set_rxnfc = mvpp2_ethtool_set_rxnfc, 5757 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, 5758 .get_rxfh = mvpp2_ethtool_get_rxfh, 5759 .set_rxfh = mvpp2_ethtool_set_rxfh, 5760 .get_rxfh_context = mvpp2_ethtool_get_rxfh_context, 5761 .set_rxfh_context = mvpp2_ethtool_set_rxfh_context, 5762 }; 5763 5764 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that 5765 * had a single IRQ defined per-port. 5766 */ 5767 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, 5768 struct device_node *port_node) 5769 { 5770 struct mvpp2_queue_vector *v = &port->qvecs[0]; 5771 5772 v->first_rxq = 0; 5773 v->nrxqs = port->nrxqs; 5774 v->type = MVPP2_QUEUE_VECTOR_SHARED; 5775 v->sw_thread_id = 0; 5776 v->sw_thread_mask = *cpumask_bits(cpu_online_mask); 5777 v->port = port; 5778 v->irq = irq_of_parse_and_map(port_node, 0); 5779 if (v->irq <= 0) 5780 return -EINVAL; 5781 netif_napi_add(port->dev, &v->napi, mvpp2_poll); 5782 5783 port->nqvecs = 1; 5784 5785 return 0; 5786 } 5787 5788 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, 5789 struct device_node *port_node) 5790 { 5791 struct mvpp2 *priv = port->priv; 5792 struct mvpp2_queue_vector *v; 5793 int i, ret; 5794 5795 switch (queue_mode) { 5796 case MVPP2_QDIST_SINGLE_MODE: 5797 port->nqvecs = priv->nthreads + 1; 5798 break; 5799 case MVPP2_QDIST_MULTI_MODE: 5800 port->nqvecs = priv->nthreads; 5801 break; 5802 } 5803 5804 for (i = 0; i < port->nqvecs; i++) { 5805 char irqname[16]; 5806 5807 v = port->qvecs + i; 5808 5809 v->port = port; 5810 v->type = MVPP2_QUEUE_VECTOR_PRIVATE; 5811 v->sw_thread_id = i; 5812 v->sw_thread_mask = BIT(i); 5813 5814 if (port->flags & MVPP2_F_DT_COMPAT) 5815 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); 5816 else 5817 snprintf(irqname, sizeof(irqname), "hif%d", i); 5818 5819 if (queue_mode == MVPP2_QDIST_MULTI_MODE) { 5820 v->first_rxq = i; 5821 v->nrxqs = 1; 5822 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && 5823 i == (port->nqvecs - 1)) { 5824 v->first_rxq = 0; 5825 v->nrxqs = port->nrxqs; 5826 v->type = MVPP2_QUEUE_VECTOR_SHARED; 5827 5828 if (port->flags & MVPP2_F_DT_COMPAT) 5829 strncpy(irqname, "rx-shared", sizeof(irqname)); 5830 } 5831 5832 if (port_node) 5833 v->irq = of_irq_get_byname(port_node, irqname); 5834 else 5835 v->irq = fwnode_irq_get(port->fwnode, i); 5836 if (v->irq <= 0) { 5837 ret = -EINVAL; 5838 goto err; 5839 } 5840 5841 netif_napi_add(port->dev, &v->napi, mvpp2_poll); 5842 } 5843 5844 return 0; 5845 5846 err: 5847 for (i = 0; i < port->nqvecs; i++) 5848 irq_dispose_mapping(port->qvecs[i].irq); 5849 return ret; 5850 } 5851 5852 static int mvpp2_queue_vectors_init(struct mvpp2_port *port, 5853 struct device_node *port_node) 5854 { 5855 if (port->has_tx_irqs) 5856 return mvpp2_multi_queue_vectors_init(port, port_node); 5857 else 5858 return mvpp2_simple_queue_vectors_init(port, port_node); 5859 } 5860 5861 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) 5862 { 5863 int i; 5864 5865 for (i = 0; i < port->nqvecs; i++) 5866 irq_dispose_mapping(port->qvecs[i].irq); 5867 } 5868 5869 /* Configure Rx queue group interrupt for this port */ 5870 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) 5871 { 5872 struct mvpp2 *priv = port->priv; 5873 u32 val; 5874 int i; 5875 5876 if (priv->hw_version == MVPP21) { 5877 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), 5878 port->nrxqs); 5879 return; 5880 } 5881 5882 /* Handle the more complicated PPv2.2 and PPv2.3 case */ 5883 for (i = 0; i < port->nqvecs; i++) { 5884 struct mvpp2_queue_vector *qv = port->qvecs + i; 5885 5886 if (!qv->nrxqs) 5887 continue; 5888 5889 val = qv->sw_thread_id; 5890 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; 5891 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 5892 5893 val = qv->first_rxq; 5894 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; 5895 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 5896 } 5897 } 5898 5899 /* Initialize port HW */ 5900 static int mvpp2_port_init(struct mvpp2_port *port) 5901 { 5902 struct device *dev = port->dev->dev.parent; 5903 struct mvpp2 *priv = port->priv; 5904 struct mvpp2_txq_pcpu *txq_pcpu; 5905 unsigned int thread; 5906 int queue, err, val; 5907 5908 /* Checks for hardware constraints */ 5909 if (port->first_rxq + port->nrxqs > 5910 MVPP2_MAX_PORTS * priv->max_port_rxqs) 5911 return -EINVAL; 5912 5913 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) 5914 return -EINVAL; 5915 5916 /* Disable port */ 5917 mvpp2_egress_disable(port); 5918 mvpp2_port_disable(port); 5919 5920 if (mvpp2_is_xlg(port->phy_interface)) { 5921 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 5922 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 5923 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; 5924 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 5925 } else { 5926 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5927 val &= ~MVPP2_GMAC_FORCE_LINK_PASS; 5928 val |= MVPP2_GMAC_FORCE_LINK_DOWN; 5929 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5930 } 5931 5932 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; 5933 5934 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), 5935 GFP_KERNEL); 5936 if (!port->txqs) 5937 return -ENOMEM; 5938 5939 /* Associate physical Tx queues to this port and initialize. 5940 * The mapping is predefined. 5941 */ 5942 for (queue = 0; queue < port->ntxqs; queue++) { 5943 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 5944 struct mvpp2_tx_queue *txq; 5945 5946 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 5947 if (!txq) { 5948 err = -ENOMEM; 5949 goto err_free_percpu; 5950 } 5951 5952 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); 5953 if (!txq->pcpu) { 5954 err = -ENOMEM; 5955 goto err_free_percpu; 5956 } 5957 5958 txq->id = queue_phy_id; 5959 txq->log_id = queue; 5960 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 5961 for (thread = 0; thread < priv->nthreads; thread++) { 5962 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 5963 txq_pcpu->thread = thread; 5964 } 5965 5966 port->txqs[queue] = txq; 5967 } 5968 5969 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), 5970 GFP_KERNEL); 5971 if (!port->rxqs) { 5972 err = -ENOMEM; 5973 goto err_free_percpu; 5974 } 5975 5976 /* Allocate and initialize Rx queue for this port */ 5977 for (queue = 0; queue < port->nrxqs; queue++) { 5978 struct mvpp2_rx_queue *rxq; 5979 5980 /* Map physical Rx queue to port's logical Rx queue */ 5981 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 5982 if (!rxq) { 5983 err = -ENOMEM; 5984 goto err_free_percpu; 5985 } 5986 /* Map this Rx queue to a physical queue */ 5987 rxq->id = port->first_rxq + queue; 5988 rxq->port = port->id; 5989 rxq->logic_rxq = queue; 5990 5991 port->rxqs[queue] = rxq; 5992 } 5993 5994 mvpp2_rx_irqs_setup(port); 5995 5996 /* Create Rx descriptor rings */ 5997 for (queue = 0; queue < port->nrxqs; queue++) { 5998 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 5999 6000 rxq->size = port->rx_ring_size; 6001 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 6002 rxq->time_coal = MVPP2_RX_COAL_USEC; 6003 } 6004 6005 mvpp2_ingress_disable(port); 6006 6007 /* Port default configuration */ 6008 mvpp2_defaults_set(port); 6009 6010 /* Port's classifier configuration */ 6011 mvpp2_cls_oversize_rxq_set(port); 6012 mvpp2_cls_port_config(port); 6013 6014 if (mvpp22_rss_is_supported(port)) 6015 mvpp22_port_rss_init(port); 6016 6017 /* Provide an initial Rx packet size */ 6018 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); 6019 6020 /* Initialize pools for swf */ 6021 err = mvpp2_swf_bm_pool_init(port); 6022 if (err) 6023 goto err_free_percpu; 6024 6025 /* Clear all port stats */ 6026 mvpp2_read_stats(port); 6027 memset(port->ethtool_stats, 0, 6028 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); 6029 6030 return 0; 6031 6032 err_free_percpu: 6033 for (queue = 0; queue < port->ntxqs; queue++) { 6034 if (!port->txqs[queue]) 6035 continue; 6036 free_percpu(port->txqs[queue]->pcpu); 6037 } 6038 return err; 6039 } 6040 6041 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, 6042 unsigned long *flags) 6043 { 6044 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", 6045 "tx-cpu3" }; 6046 int i; 6047 6048 for (i = 0; i < 5; i++) 6049 if (of_property_match_string(port_node, "interrupt-names", 6050 irqs[i]) < 0) 6051 return false; 6052 6053 *flags |= MVPP2_F_DT_COMPAT; 6054 return true; 6055 } 6056 6057 /* Checks if the port dt description has the required Tx interrupts: 6058 * - PPv2.1: there are no such interrupts. 6059 * - PPv2.2 and PPv2.3: 6060 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] 6061 * - The new ones have: "hifX" with X in [0..8] 6062 * 6063 * All those variants are supported to keep the backward compatibility. 6064 */ 6065 static bool mvpp2_port_has_irqs(struct mvpp2 *priv, 6066 struct device_node *port_node, 6067 unsigned long *flags) 6068 { 6069 char name[5]; 6070 int i; 6071 6072 /* ACPI */ 6073 if (!port_node) 6074 return true; 6075 6076 if (priv->hw_version == MVPP21) 6077 return false; 6078 6079 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) 6080 return true; 6081 6082 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 6083 snprintf(name, 5, "hif%d", i); 6084 if (of_property_match_string(port_node, "interrupt-names", 6085 name) < 0) 6086 return false; 6087 } 6088 6089 return true; 6090 } 6091 6092 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, 6093 struct fwnode_handle *fwnode, 6094 char **mac_from) 6095 { 6096 struct mvpp2_port *port = netdev_priv(dev); 6097 char hw_mac_addr[ETH_ALEN] = {0}; 6098 char fw_mac_addr[ETH_ALEN]; 6099 6100 if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) { 6101 *mac_from = "firmware node"; 6102 eth_hw_addr_set(dev, fw_mac_addr); 6103 return; 6104 } 6105 6106 if (priv->hw_version == MVPP21) { 6107 mvpp21_get_mac_address(port, hw_mac_addr); 6108 if (is_valid_ether_addr(hw_mac_addr)) { 6109 *mac_from = "hardware"; 6110 eth_hw_addr_set(dev, hw_mac_addr); 6111 return; 6112 } 6113 } 6114 6115 /* Only valid on OF enabled platforms */ 6116 if (!of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr)) { 6117 *mac_from = "nvmem cell"; 6118 eth_hw_addr_set(dev, fw_mac_addr); 6119 return; 6120 } 6121 6122 *mac_from = "random"; 6123 eth_hw_addr_random(dev); 6124 } 6125 6126 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) 6127 { 6128 return container_of(config, struct mvpp2_port, phylink_config); 6129 } 6130 6131 static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs) 6132 { 6133 return container_of(pcs, struct mvpp2_port, pcs_xlg); 6134 } 6135 6136 static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs) 6137 { 6138 return container_of(pcs, struct mvpp2_port, pcs_gmac); 6139 } 6140 6141 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, 6142 struct phylink_link_state *state) 6143 { 6144 struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs); 6145 u32 val; 6146 6147 if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER) 6148 state->speed = SPEED_5000; 6149 else 6150 state->speed = SPEED_10000; 6151 state->duplex = 1; 6152 state->an_complete = 1; 6153 6154 val = readl(port->base + MVPP22_XLG_STATUS); 6155 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); 6156 6157 state->pause = 0; 6158 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 6159 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) 6160 state->pause |= MLO_PAUSE_TX; 6161 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) 6162 state->pause |= MLO_PAUSE_RX; 6163 } 6164 6165 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, 6166 unsigned int mode, 6167 phy_interface_t interface, 6168 const unsigned long *advertising, 6169 bool permit_pause_to_mac) 6170 { 6171 return 0; 6172 } 6173 6174 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { 6175 .pcs_get_state = mvpp2_xlg_pcs_get_state, 6176 .pcs_config = mvpp2_xlg_pcs_config, 6177 }; 6178 6179 static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs, 6180 unsigned long *supported, 6181 const struct phylink_link_state *state) 6182 { 6183 /* When in 802.3z mode, we must have AN enabled: 6184 * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... 6185 * When <PortType> = 1 (1000BASE-X) this field must be set to 1. 6186 */ 6187 if (phy_interface_mode_is_8023z(state->interface) && 6188 !phylink_test(state->advertising, Autoneg)) 6189 return -EINVAL; 6190 6191 return 0; 6192 } 6193 6194 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, 6195 struct phylink_link_state *state) 6196 { 6197 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); 6198 u32 val; 6199 6200 val = readl(port->base + MVPP2_GMAC_STATUS0); 6201 6202 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); 6203 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); 6204 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); 6205 6206 switch (port->phy_interface) { 6207 case PHY_INTERFACE_MODE_1000BASEX: 6208 state->speed = SPEED_1000; 6209 break; 6210 case PHY_INTERFACE_MODE_2500BASEX: 6211 state->speed = SPEED_2500; 6212 break; 6213 default: 6214 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) 6215 state->speed = SPEED_1000; 6216 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) 6217 state->speed = SPEED_100; 6218 else 6219 state->speed = SPEED_10; 6220 } 6221 6222 state->pause = 0; 6223 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) 6224 state->pause |= MLO_PAUSE_RX; 6225 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) 6226 state->pause |= MLO_PAUSE_TX; 6227 } 6228 6229 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode, 6230 phy_interface_t interface, 6231 const unsigned long *advertising, 6232 bool permit_pause_to_mac) 6233 { 6234 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); 6235 u32 mask, val, an, old_an, changed; 6236 6237 mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | 6238 MVPP2_GMAC_IN_BAND_AUTONEG | 6239 MVPP2_GMAC_AN_SPEED_EN | 6240 MVPP2_GMAC_FLOW_CTRL_AUTONEG | 6241 MVPP2_GMAC_AN_DUPLEX_EN; 6242 6243 if (phylink_autoneg_inband(mode)) { 6244 mask |= MVPP2_GMAC_CONFIG_MII_SPEED | 6245 MVPP2_GMAC_CONFIG_GMII_SPEED | 6246 MVPP2_GMAC_CONFIG_FULL_DUPLEX; 6247 val = MVPP2_GMAC_IN_BAND_AUTONEG; 6248 6249 if (interface == PHY_INTERFACE_MODE_SGMII) { 6250 /* SGMII mode receives the speed and duplex from PHY */ 6251 val |= MVPP2_GMAC_AN_SPEED_EN | 6252 MVPP2_GMAC_AN_DUPLEX_EN; 6253 } else { 6254 /* 802.3z mode has fixed speed and duplex */ 6255 val |= MVPP2_GMAC_CONFIG_GMII_SPEED | 6256 MVPP2_GMAC_CONFIG_FULL_DUPLEX; 6257 6258 /* The FLOW_CTRL_AUTONEG bit selects either the hardware 6259 * automatically or the bits in MVPP22_GMAC_CTRL_4_REG 6260 * manually controls the GMAC pause modes. 6261 */ 6262 if (permit_pause_to_mac) 6263 val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; 6264 6265 /* Configure advertisement bits */ 6266 mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN; 6267 if (phylink_test(advertising, Pause)) 6268 val |= MVPP2_GMAC_FC_ADV_EN; 6269 if (phylink_test(advertising, Asym_Pause)) 6270 val |= MVPP2_GMAC_FC_ADV_ASM_EN; 6271 } 6272 } else { 6273 val = 0; 6274 } 6275 6276 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6277 an = (an & ~mask) | val; 6278 changed = an ^ old_an; 6279 if (changed) 6280 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6281 6282 /* We are only interested in the advertisement bits changing */ 6283 return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN); 6284 } 6285 6286 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) 6287 { 6288 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); 6289 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6290 6291 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, 6292 port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6293 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, 6294 port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6295 } 6296 6297 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { 6298 .pcs_validate = mvpp2_gmac_pcs_validate, 6299 .pcs_get_state = mvpp2_gmac_pcs_get_state, 6300 .pcs_config = mvpp2_gmac_pcs_config, 6301 .pcs_an_restart = mvpp2_gmac_pcs_an_restart, 6302 }; 6303 6304 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, 6305 const struct phylink_link_state *state) 6306 { 6307 u32 val; 6308 6309 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6310 MVPP22_XLG_CTRL0_MAC_RESET_DIS, 6311 MVPP22_XLG_CTRL0_MAC_RESET_DIS); 6312 mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG, 6313 MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | 6314 MVPP22_XLG_CTRL4_EN_IDLE_CHECK | 6315 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC, 6316 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC); 6317 6318 /* Wait for reset to deassert */ 6319 do { 6320 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 6321 } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS)); 6322 } 6323 6324 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, 6325 const struct phylink_link_state *state) 6326 { 6327 u32 old_ctrl0, ctrl0; 6328 u32 old_ctrl2, ctrl2; 6329 u32 old_ctrl4, ctrl4; 6330 6331 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 6332 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 6333 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); 6334 6335 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; 6336 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK); 6337 6338 /* Configure port type */ 6339 if (phy_interface_mode_is_8023z(state->interface)) { 6340 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; 6341 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 6342 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | 6343 MVPP22_CTRL4_DP_CLK_SEL | 6344 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 6345 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 6346 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; 6347 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 6348 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | 6349 MVPP22_CTRL4_DP_CLK_SEL | 6350 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 6351 } else if (phy_interface_mode_is_rgmii(state->interface)) { 6352 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; 6353 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | 6354 MVPP22_CTRL4_SYNC_BYPASS_DIS | 6355 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 6356 } 6357 6358 /* Configure negotiation style */ 6359 if (!phylink_autoneg_inband(mode)) { 6360 /* Phy or fixed speed - no in-band AN, nothing to do, leave the 6361 * configured speed, duplex and flow control as-is. 6362 */ 6363 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 6364 /* SGMII in-band mode receives the speed and duplex from 6365 * the PHY. Flow control information is not received. */ 6366 } else if (phy_interface_mode_is_8023z(state->interface)) { 6367 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can 6368 * they negotiate duplex: they are always operating with a fixed 6369 * speed of 1000/2500Mbps in full duplex, so force 1000/2500 6370 * speed and full duplex here. 6371 */ 6372 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; 6373 } 6374 6375 if (old_ctrl0 != ctrl0) 6376 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); 6377 if (old_ctrl2 != ctrl2) 6378 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); 6379 if (old_ctrl4 != ctrl4) 6380 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); 6381 } 6382 6383 static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config, 6384 phy_interface_t interface) 6385 { 6386 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6387 6388 /* Select the appropriate PCS operations depending on the 6389 * configured interface mode. We will only switch to a mode 6390 * that the validate() checks have already passed. 6391 */ 6392 if (mvpp2_is_xlg(interface)) 6393 return &port->pcs_xlg; 6394 else 6395 return &port->pcs_gmac; 6396 } 6397 6398 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, 6399 phy_interface_t interface) 6400 { 6401 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6402 6403 /* Check for invalid configuration */ 6404 if (mvpp2_is_xlg(interface) && port->gop_id != 0) { 6405 netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); 6406 return -EINVAL; 6407 } 6408 6409 if (port->phy_interface != interface || 6410 phylink_autoneg_inband(mode)) { 6411 /* Force the link down when changing the interface or if in 6412 * in-band mode to ensure we do not change the configuration 6413 * while the hardware is indicating link is up. We force both 6414 * XLG and GMAC down to ensure that they're both in a known 6415 * state. 6416 */ 6417 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, 6418 MVPP2_GMAC_FORCE_LINK_PASS | 6419 MVPP2_GMAC_FORCE_LINK_DOWN, 6420 MVPP2_GMAC_FORCE_LINK_DOWN); 6421 6422 if (mvpp2_port_supports_xlg(port)) 6423 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6424 MVPP22_XLG_CTRL0_FORCE_LINK_PASS | 6425 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 6426 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN); 6427 } 6428 6429 /* Make sure the port is disabled when reconfiguring the mode */ 6430 mvpp2_port_disable(port); 6431 6432 if (port->phy_interface != interface) { 6433 /* Place GMAC into reset */ 6434 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, 6435 MVPP2_GMAC_PORT_RESET_MASK, 6436 MVPP2_GMAC_PORT_RESET_MASK); 6437 6438 if (port->priv->hw_version >= MVPP22) { 6439 mvpp22_gop_mask_irq(port); 6440 6441 phy_power_off(port->comphy); 6442 6443 /* Reconfigure the serdes lanes */ 6444 mvpp22_mode_reconfigure(port, interface); 6445 } 6446 } 6447 6448 return 0; 6449 } 6450 6451 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, 6452 const struct phylink_link_state *state) 6453 { 6454 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6455 6456 /* mac (re)configuration */ 6457 if (mvpp2_is_xlg(state->interface)) 6458 mvpp2_xlg_config(port, mode, state); 6459 else if (phy_interface_mode_is_rgmii(state->interface) || 6460 phy_interface_mode_is_8023z(state->interface) || 6461 state->interface == PHY_INTERFACE_MODE_SGMII) 6462 mvpp2_gmac_config(port, mode, state); 6463 6464 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) 6465 mvpp2_port_loopback_set(port, state); 6466 } 6467 6468 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode, 6469 phy_interface_t interface) 6470 { 6471 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6472 6473 if (port->priv->hw_version >= MVPP22 && 6474 port->phy_interface != interface) { 6475 port->phy_interface = interface; 6476 6477 /* Unmask interrupts */ 6478 mvpp22_gop_unmask_irq(port); 6479 } 6480 6481 if (!mvpp2_is_xlg(interface)) { 6482 /* Release GMAC reset and wait */ 6483 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, 6484 MVPP2_GMAC_PORT_RESET_MASK, 0); 6485 6486 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 6487 MVPP2_GMAC_PORT_RESET_MASK) 6488 continue; 6489 } 6490 6491 mvpp2_port_enable(port); 6492 6493 /* Allow the link to come up if in in-band mode, otherwise the 6494 * link is forced via mac_link_down()/mac_link_up() 6495 */ 6496 if (phylink_autoneg_inband(mode)) { 6497 if (mvpp2_is_xlg(interface)) 6498 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6499 MVPP22_XLG_CTRL0_FORCE_LINK_PASS | 6500 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0); 6501 else 6502 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, 6503 MVPP2_GMAC_FORCE_LINK_PASS | 6504 MVPP2_GMAC_FORCE_LINK_DOWN, 0); 6505 } 6506 6507 return 0; 6508 } 6509 6510 static void mvpp2_mac_link_up(struct phylink_config *config, 6511 struct phy_device *phy, 6512 unsigned int mode, phy_interface_t interface, 6513 int speed, int duplex, 6514 bool tx_pause, bool rx_pause) 6515 { 6516 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6517 u32 val; 6518 int i; 6519 6520 if (mvpp2_is_xlg(interface)) { 6521 if (!phylink_autoneg_inband(mode)) { 6522 val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 6523 if (tx_pause) 6524 val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; 6525 if (rx_pause) 6526 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; 6527 6528 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6529 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN | 6530 MVPP22_XLG_CTRL0_FORCE_LINK_PASS | 6531 MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | 6532 MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val); 6533 } 6534 } else { 6535 if (!phylink_autoneg_inband(mode)) { 6536 val = MVPP2_GMAC_FORCE_LINK_PASS; 6537 6538 if (speed == SPEED_1000 || speed == SPEED_2500) 6539 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 6540 else if (speed == SPEED_100) 6541 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 6542 6543 if (duplex == DUPLEX_FULL) 6544 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 6545 6546 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, 6547 MVPP2_GMAC_FORCE_LINK_DOWN | 6548 MVPP2_GMAC_FORCE_LINK_PASS | 6549 MVPP2_GMAC_CONFIG_MII_SPEED | 6550 MVPP2_GMAC_CONFIG_GMII_SPEED | 6551 MVPP2_GMAC_CONFIG_FULL_DUPLEX, val); 6552 } 6553 6554 /* We can always update the flow control enable bits; 6555 * these will only be effective if flow control AN 6556 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. 6557 */ 6558 val = 0; 6559 if (tx_pause) 6560 val |= MVPP22_CTRL4_TX_FC_EN; 6561 if (rx_pause) 6562 val |= MVPP22_CTRL4_RX_FC_EN; 6563 6564 mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG, 6565 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN, 6566 val); 6567 } 6568 6569 if (port->priv->global_tx_fc) { 6570 port->tx_fc = tx_pause; 6571 if (tx_pause) 6572 mvpp2_rxq_enable_fc(port); 6573 else 6574 mvpp2_rxq_disable_fc(port); 6575 if (port->priv->percpu_pools) { 6576 for (i = 0; i < port->nrxqs; i++) 6577 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause); 6578 } else { 6579 mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause); 6580 mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause); 6581 } 6582 if (port->priv->hw_version == MVPP23) 6583 mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause); 6584 } 6585 6586 mvpp2_port_enable(port); 6587 6588 mvpp2_egress_enable(port); 6589 mvpp2_ingress_enable(port); 6590 netif_tx_wake_all_queues(port->dev); 6591 } 6592 6593 static void mvpp2_mac_link_down(struct phylink_config *config, 6594 unsigned int mode, phy_interface_t interface) 6595 { 6596 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6597 u32 val; 6598 6599 if (!phylink_autoneg_inband(mode)) { 6600 if (mvpp2_is_xlg(interface)) { 6601 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 6602 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 6603 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; 6604 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 6605 } else { 6606 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6607 val &= ~MVPP2_GMAC_FORCE_LINK_PASS; 6608 val |= MVPP2_GMAC_FORCE_LINK_DOWN; 6609 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6610 } 6611 } 6612 6613 netif_tx_stop_all_queues(port->dev); 6614 mvpp2_egress_disable(port); 6615 mvpp2_ingress_disable(port); 6616 6617 mvpp2_port_disable(port); 6618 } 6619 6620 static const struct phylink_mac_ops mvpp2_phylink_ops = { 6621 .mac_select_pcs = mvpp2_select_pcs, 6622 .mac_prepare = mvpp2_mac_prepare, 6623 .mac_config = mvpp2_mac_config, 6624 .mac_finish = mvpp2_mac_finish, 6625 .mac_link_up = mvpp2_mac_link_up, 6626 .mac_link_down = mvpp2_mac_link_down, 6627 }; 6628 6629 /* Work-around for ACPI */ 6630 static void mvpp2_acpi_start(struct mvpp2_port *port) 6631 { 6632 /* Phylink isn't used as of now for ACPI, so the MAC has to be 6633 * configured manually when the interface is started. This will 6634 * be removed as soon as the phylink ACPI support lands in. 6635 */ 6636 struct phylink_link_state state = { 6637 .interface = port->phy_interface, 6638 }; 6639 struct phylink_pcs *pcs; 6640 6641 pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface); 6642 6643 mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND, 6644 port->phy_interface); 6645 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); 6646 pcs->ops->pcs_config(pcs, MLO_AN_INBAND, port->phy_interface, 6647 state.advertising, false); 6648 mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, 6649 port->phy_interface); 6650 mvpp2_mac_link_up(&port->phylink_config, NULL, 6651 MLO_AN_INBAND, port->phy_interface, 6652 SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); 6653 } 6654 6655 /* In order to ensure backward compatibility for ACPI, check if the port 6656 * firmware node comprises the necessary description allowing to use phylink. 6657 */ 6658 static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode) 6659 { 6660 if (!is_acpi_node(port_fwnode)) 6661 return false; 6662 6663 return (!fwnode_property_present(port_fwnode, "phy-handle") && 6664 !fwnode_property_present(port_fwnode, "managed") && 6665 !fwnode_get_named_child_node(port_fwnode, "fixed-link")); 6666 } 6667 6668 /* Ports initialization */ 6669 static int mvpp2_port_probe(struct platform_device *pdev, 6670 struct fwnode_handle *port_fwnode, 6671 struct mvpp2 *priv) 6672 { 6673 struct phy *comphy = NULL; 6674 struct mvpp2_port *port; 6675 struct mvpp2_port_pcpu *port_pcpu; 6676 struct device_node *port_node = to_of_node(port_fwnode); 6677 netdev_features_t features; 6678 struct net_device *dev; 6679 struct phylink *phylink; 6680 char *mac_from = ""; 6681 unsigned int ntxqs, nrxqs, thread; 6682 unsigned long flags = 0; 6683 bool has_tx_irqs; 6684 u32 id; 6685 int phy_mode; 6686 int err, i; 6687 6688 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); 6689 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { 6690 dev_err(&pdev->dev, 6691 "not enough IRQs to support multi queue mode\n"); 6692 return -EINVAL; 6693 } 6694 6695 ntxqs = MVPP2_MAX_TXQ; 6696 nrxqs = mvpp2_get_nrxqs(priv); 6697 6698 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); 6699 if (!dev) 6700 return -ENOMEM; 6701 6702 phy_mode = fwnode_get_phy_mode(port_fwnode); 6703 if (phy_mode < 0) { 6704 dev_err(&pdev->dev, "incorrect phy mode\n"); 6705 err = phy_mode; 6706 goto err_free_netdev; 6707 } 6708 6709 /* 6710 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT. 6711 * Existing usage of 10GBASE-KR is not correct; no backplane 6712 * negotiation is done, and this driver does not actually support 6713 * 10GBASE-KR. 6714 */ 6715 if (phy_mode == PHY_INTERFACE_MODE_10GKR) 6716 phy_mode = PHY_INTERFACE_MODE_10GBASER; 6717 6718 if (port_node) { 6719 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); 6720 if (IS_ERR(comphy)) { 6721 if (PTR_ERR(comphy) == -EPROBE_DEFER) { 6722 err = -EPROBE_DEFER; 6723 goto err_free_netdev; 6724 } 6725 comphy = NULL; 6726 } 6727 } 6728 6729 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { 6730 err = -EINVAL; 6731 dev_err(&pdev->dev, "missing port-id value\n"); 6732 goto err_free_netdev; 6733 } 6734 6735 dev->tx_queue_len = MVPP2_MAX_TXD_MAX; 6736 dev->watchdog_timeo = 5 * HZ; 6737 dev->netdev_ops = &mvpp2_netdev_ops; 6738 dev->ethtool_ops = &mvpp2_eth_tool_ops; 6739 6740 port = netdev_priv(dev); 6741 port->dev = dev; 6742 port->fwnode = port_fwnode; 6743 port->ntxqs = ntxqs; 6744 port->nrxqs = nrxqs; 6745 port->priv = priv; 6746 port->has_tx_irqs = has_tx_irqs; 6747 port->flags = flags; 6748 6749 err = mvpp2_queue_vectors_init(port, port_node); 6750 if (err) 6751 goto err_free_netdev; 6752 6753 if (port_node) 6754 port->port_irq = of_irq_get_byname(port_node, "link"); 6755 else 6756 port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); 6757 if (port->port_irq == -EPROBE_DEFER) { 6758 err = -EPROBE_DEFER; 6759 goto err_deinit_qvecs; 6760 } 6761 if (port->port_irq <= 0) 6762 /* the link irq is optional */ 6763 port->port_irq = 0; 6764 6765 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) 6766 port->flags |= MVPP2_F_LOOPBACK; 6767 6768 port->id = id; 6769 if (priv->hw_version == MVPP21) 6770 port->first_rxq = port->id * port->nrxqs; 6771 else 6772 port->first_rxq = port->id * priv->max_port_rxqs; 6773 6774 port->of_node = port_node; 6775 port->phy_interface = phy_mode; 6776 port->comphy = comphy; 6777 6778 if (priv->hw_version == MVPP21) { 6779 port->base = devm_platform_ioremap_resource(pdev, 2 + id); 6780 if (IS_ERR(port->base)) { 6781 err = PTR_ERR(port->base); 6782 goto err_free_irq; 6783 } 6784 6785 port->stats_base = port->priv->lms_base + 6786 MVPP21_MIB_COUNTERS_OFFSET + 6787 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; 6788 } else { 6789 if (fwnode_property_read_u32(port_fwnode, "gop-port-id", 6790 &port->gop_id)) { 6791 err = -EINVAL; 6792 dev_err(&pdev->dev, "missing gop-port-id value\n"); 6793 goto err_deinit_qvecs; 6794 } 6795 6796 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); 6797 port->stats_base = port->priv->iface_base + 6798 MVPP22_MIB_COUNTERS_OFFSET + 6799 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; 6800 6801 /* We may want a property to describe whether we should use 6802 * MAC hardware timestamping. 6803 */ 6804 if (priv->tai) 6805 port->hwtstamp = true; 6806 } 6807 6808 /* Alloc per-cpu and ethtool stats */ 6809 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); 6810 if (!port->stats) { 6811 err = -ENOMEM; 6812 goto err_free_irq; 6813 } 6814 6815 port->ethtool_stats = devm_kcalloc(&pdev->dev, 6816 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs), 6817 sizeof(u64), GFP_KERNEL); 6818 if (!port->ethtool_stats) { 6819 err = -ENOMEM; 6820 goto err_free_stats; 6821 } 6822 6823 mutex_init(&port->gather_stats_lock); 6824 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); 6825 6826 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); 6827 6828 port->tx_ring_size = MVPP2_MAX_TXD_DFLT; 6829 port->rx_ring_size = MVPP2_MAX_RXD_DFLT; 6830 SET_NETDEV_DEV(dev, &pdev->dev); 6831 6832 err = mvpp2_port_init(port); 6833 if (err < 0) { 6834 dev_err(&pdev->dev, "failed to init port %d\n", id); 6835 goto err_free_stats; 6836 } 6837 6838 mvpp2_port_periodic_xon_disable(port); 6839 6840 mvpp2_mac_reset_assert(port); 6841 mvpp22_pcs_reset_assert(port); 6842 6843 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); 6844 if (!port->pcpu) { 6845 err = -ENOMEM; 6846 goto err_free_txq_pcpu; 6847 } 6848 6849 if (!port->has_tx_irqs) { 6850 for (thread = 0; thread < priv->nthreads; thread++) { 6851 port_pcpu = per_cpu_ptr(port->pcpu, thread); 6852 6853 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, 6854 HRTIMER_MODE_REL_PINNED_SOFT); 6855 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; 6856 port_pcpu->timer_scheduled = false; 6857 port_pcpu->dev = dev; 6858 } 6859 } 6860 6861 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 6862 NETIF_F_TSO; 6863 dev->features = features | NETIF_F_RXCSUM; 6864 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | 6865 NETIF_F_HW_VLAN_CTAG_FILTER; 6866 6867 if (mvpp22_rss_is_supported(port)) { 6868 dev->hw_features |= NETIF_F_RXHASH; 6869 dev->features |= NETIF_F_NTUPLE; 6870 } 6871 6872 if (!port->priv->percpu_pools) 6873 mvpp2_set_hw_csum(port, port->pool_long->id); 6874 else if (port->ntxqs >= num_possible_cpus() * 2) 6875 dev->xdp_features = NETDEV_XDP_ACT_BASIC | 6876 NETDEV_XDP_ACT_REDIRECT | 6877 NETDEV_XDP_ACT_NDO_XMIT; 6878 6879 dev->vlan_features |= features; 6880 netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS); 6881 6882 dev->priv_flags |= IFF_UNICAST_FLT; 6883 6884 /* MTU range: 68 - 9704 */ 6885 dev->min_mtu = ETH_MIN_MTU; 6886 /* 9704 == 9728 - 20 and rounding to 8 */ 6887 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; 6888 dev->dev.of_node = port_node; 6889 6890 port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; 6891 port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; 6892 6893 if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { 6894 port->phylink_config.dev = &dev->dev; 6895 port->phylink_config.type = PHYLINK_NETDEV; 6896 port->phylink_config.mac_capabilities = 6897 MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10; 6898 6899 if (port->priv->global_tx_fc) 6900 port->phylink_config.mac_capabilities |= 6901 MAC_SYM_PAUSE | MAC_ASYM_PAUSE; 6902 6903 if (mvpp2_port_supports_xlg(port)) { 6904 /* If a COMPHY is present, we can support any of 6905 * the serdes modes and switch between them. 6906 */ 6907 if (comphy) { 6908 __set_bit(PHY_INTERFACE_MODE_5GBASER, 6909 port->phylink_config.supported_interfaces); 6910 __set_bit(PHY_INTERFACE_MODE_10GBASER, 6911 port->phylink_config.supported_interfaces); 6912 __set_bit(PHY_INTERFACE_MODE_XAUI, 6913 port->phylink_config.supported_interfaces); 6914 } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) { 6915 __set_bit(PHY_INTERFACE_MODE_5GBASER, 6916 port->phylink_config.supported_interfaces); 6917 } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) { 6918 __set_bit(PHY_INTERFACE_MODE_10GBASER, 6919 port->phylink_config.supported_interfaces); 6920 } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) { 6921 __set_bit(PHY_INTERFACE_MODE_XAUI, 6922 port->phylink_config.supported_interfaces); 6923 } 6924 6925 if (comphy) 6926 port->phylink_config.mac_capabilities |= 6927 MAC_10000FD | MAC_5000FD; 6928 else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) 6929 port->phylink_config.mac_capabilities |= 6930 MAC_5000FD; 6931 else 6932 port->phylink_config.mac_capabilities |= 6933 MAC_10000FD; 6934 } 6935 6936 if (mvpp2_port_supports_rgmii(port)) 6937 phy_interface_set_rgmii(port->phylink_config.supported_interfaces); 6938 6939 if (comphy) { 6940 /* If a COMPHY is present, we can support any of the 6941 * serdes modes and switch between them. 6942 */ 6943 __set_bit(PHY_INTERFACE_MODE_SGMII, 6944 port->phylink_config.supported_interfaces); 6945 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 6946 port->phylink_config.supported_interfaces); 6947 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 6948 port->phylink_config.supported_interfaces); 6949 } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { 6950 /* No COMPHY, with only 2500BASE-X mode supported */ 6951 __set_bit(PHY_INTERFACE_MODE_2500BASEX, 6952 port->phylink_config.supported_interfaces); 6953 } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || 6954 phy_mode == PHY_INTERFACE_MODE_SGMII) { 6955 /* No COMPHY, we can switch between 1000BASE-X and SGMII 6956 */ 6957 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 6958 port->phylink_config.supported_interfaces); 6959 __set_bit(PHY_INTERFACE_MODE_SGMII, 6960 port->phylink_config.supported_interfaces); 6961 } 6962 6963 phylink = phylink_create(&port->phylink_config, port_fwnode, 6964 phy_mode, &mvpp2_phylink_ops); 6965 if (IS_ERR(phylink)) { 6966 err = PTR_ERR(phylink); 6967 goto err_free_port_pcpu; 6968 } 6969 port->phylink = phylink; 6970 } else { 6971 dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id); 6972 port->phylink = NULL; 6973 } 6974 6975 /* Cycle the comphy to power it down, saving 270mW per port - 6976 * don't worry about an error powering it up. When the comphy 6977 * driver does this, we can remove this code. 6978 */ 6979 if (port->comphy) { 6980 err = mvpp22_comphy_init(port, port->phy_interface); 6981 if (err == 0) 6982 phy_power_off(port->comphy); 6983 } 6984 6985 err = register_netdev(dev); 6986 if (err < 0) { 6987 dev_err(&pdev->dev, "failed to register netdev\n"); 6988 goto err_phylink; 6989 } 6990 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 6991 6992 priv->port_list[priv->port_count++] = port; 6993 6994 return 0; 6995 6996 err_phylink: 6997 if (port->phylink) 6998 phylink_destroy(port->phylink); 6999 err_free_port_pcpu: 7000 free_percpu(port->pcpu); 7001 err_free_txq_pcpu: 7002 for (i = 0; i < port->ntxqs; i++) 7003 free_percpu(port->txqs[i]->pcpu); 7004 err_free_stats: 7005 free_percpu(port->stats); 7006 err_free_irq: 7007 if (port->port_irq) 7008 irq_dispose_mapping(port->port_irq); 7009 err_deinit_qvecs: 7010 mvpp2_queue_vectors_deinit(port); 7011 err_free_netdev: 7012 free_netdev(dev); 7013 return err; 7014 } 7015 7016 /* Ports removal routine */ 7017 static void mvpp2_port_remove(struct mvpp2_port *port) 7018 { 7019 int i; 7020 7021 unregister_netdev(port->dev); 7022 if (port->phylink) 7023 phylink_destroy(port->phylink); 7024 free_percpu(port->pcpu); 7025 free_percpu(port->stats); 7026 for (i = 0; i < port->ntxqs; i++) 7027 free_percpu(port->txqs[i]->pcpu); 7028 mvpp2_queue_vectors_deinit(port); 7029 if (port->port_irq) 7030 irq_dispose_mapping(port->port_irq); 7031 free_netdev(port->dev); 7032 } 7033 7034 /* Initialize decoding windows */ 7035 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 7036 struct mvpp2 *priv) 7037 { 7038 u32 win_enable; 7039 int i; 7040 7041 for (i = 0; i < 6; i++) { 7042 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 7043 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 7044 7045 if (i < 4) 7046 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 7047 } 7048 7049 win_enable = 0; 7050 7051 for (i = 0; i < dram->num_cs; i++) { 7052 const struct mbus_dram_window *cs = dram->cs + i; 7053 7054 mvpp2_write(priv, MVPP2_WIN_BASE(i), 7055 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 7056 dram->mbus_dram_target_id); 7057 7058 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 7059 (cs->size - 1) & 0xffff0000); 7060 7061 win_enable |= (1 << i); 7062 } 7063 7064 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 7065 } 7066 7067 /* Initialize Rx FIFO's */ 7068 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 7069 { 7070 int port; 7071 7072 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 7073 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 7074 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 7075 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 7076 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); 7077 } 7078 7079 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 7080 MVPP2_RX_FIFO_PORT_MIN_PKT); 7081 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 7082 } 7083 7084 static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size) 7085 { 7086 int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size); 7087 7088 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size); 7089 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size); 7090 } 7091 7092 /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3. 7093 * 4kB fixed space must be assigned for the loopback port. 7094 * Redistribute remaining avialable 44kB space among all active ports. 7095 * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G 7096 * SGMII link. 7097 */ 7098 static void mvpp22_rx_fifo_init(struct mvpp2 *priv) 7099 { 7100 int remaining_ports_count; 7101 unsigned long port_map; 7102 int size_remainder; 7103 int port, size; 7104 7105 /* The loopback requires fixed 4kB of the FIFO space assignment. */ 7106 mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, 7107 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 7108 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); 7109 7110 /* Set RX FIFO size to 0 for inactive ports. */ 7111 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) 7112 mvpp22_rx_fifo_set_hw(priv, port, 0); 7113 7114 /* Assign remaining RX FIFO space among all active ports. */ 7115 size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB; 7116 remaining_ports_count = hweight_long(port_map); 7117 7118 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { 7119 if (remaining_ports_count == 1) 7120 size = size_remainder; 7121 else if (port == 0) 7122 size = max(size_remainder / remaining_ports_count, 7123 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); 7124 else if (port == 1) 7125 size = max(size_remainder / remaining_ports_count, 7126 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); 7127 else 7128 size = size_remainder / remaining_ports_count; 7129 7130 size_remainder -= size; 7131 remaining_ports_count--; 7132 7133 mvpp22_rx_fifo_set_hw(priv, port, size); 7134 } 7135 7136 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 7137 MVPP2_RX_FIFO_PORT_MIN_PKT); 7138 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 7139 } 7140 7141 /* Configure Rx FIFO Flow control thresholds */ 7142 static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv) 7143 { 7144 int port, val; 7145 7146 /* Port 0: maximum speed -10Gb/s port 7147 * required by spec RX FIFO threshold 9KB 7148 * Port 1: maximum speed -5Gb/s port 7149 * required by spec RX FIFO threshold 4KB 7150 * Port 2: maximum speed -1Gb/s port 7151 * required by spec RX FIFO threshold 2KB 7152 */ 7153 7154 /* Without loopback port */ 7155 for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) { 7156 if (port == 0) { 7157 val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) 7158 << MVPP2_RX_FC_TRSH_OFFS; 7159 val &= MVPP2_RX_FC_TRSH_MASK; 7160 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7161 } else if (port == 1) { 7162 val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) 7163 << MVPP2_RX_FC_TRSH_OFFS; 7164 val &= MVPP2_RX_FC_TRSH_MASK; 7165 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7166 } else { 7167 val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) 7168 << MVPP2_RX_FC_TRSH_OFFS; 7169 val &= MVPP2_RX_FC_TRSH_MASK; 7170 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7171 } 7172 } 7173 } 7174 7175 /* Configure Rx FIFO Flow control thresholds */ 7176 void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en) 7177 { 7178 int val; 7179 7180 val = mvpp2_read(priv, MVPP2_RX_FC_REG(port)); 7181 7182 if (en) 7183 val |= MVPP2_RX_FC_EN; 7184 else 7185 val &= ~MVPP2_RX_FC_EN; 7186 7187 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7188 } 7189 7190 static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size) 7191 { 7192 int threshold = MVPP2_TX_FIFO_THRESHOLD(size); 7193 7194 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); 7195 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold); 7196 } 7197 7198 /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3. 7199 * 1kB fixed space must be assigned for the loopback port. 7200 * Redistribute remaining avialable 18kB space among all active ports. 7201 * The 10G interface should use 10kB (which is maximum possible size 7202 * per single port). 7203 */ 7204 static void mvpp22_tx_fifo_init(struct mvpp2 *priv) 7205 { 7206 int remaining_ports_count; 7207 unsigned long port_map; 7208 int size_remainder; 7209 int port, size; 7210 7211 /* The loopback requires fixed 1kB of the FIFO space assignment. */ 7212 mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, 7213 MVPP22_TX_FIFO_DATA_SIZE_1KB); 7214 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); 7215 7216 /* Set TX FIFO size to 0 for inactive ports. */ 7217 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) 7218 mvpp22_tx_fifo_set_hw(priv, port, 0); 7219 7220 /* Assign remaining TX FIFO space among all active ports. */ 7221 size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB; 7222 remaining_ports_count = hweight_long(port_map); 7223 7224 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { 7225 if (remaining_ports_count == 1) 7226 size = min(size_remainder, 7227 MVPP22_TX_FIFO_DATA_SIZE_10KB); 7228 else if (port == 0) 7229 size = MVPP22_TX_FIFO_DATA_SIZE_10KB; 7230 else 7231 size = size_remainder / remaining_ports_count; 7232 7233 size_remainder -= size; 7234 remaining_ports_count--; 7235 7236 mvpp22_tx_fifo_set_hw(priv, port, size); 7237 } 7238 } 7239 7240 static void mvpp2_axi_init(struct mvpp2 *priv) 7241 { 7242 u32 val, rdval, wrval; 7243 7244 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 7245 7246 /* AXI Bridge Configuration */ 7247 7248 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 7249 << MVPP22_AXI_ATTR_CACHE_OFFS; 7250 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7251 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 7252 7253 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 7254 << MVPP22_AXI_ATTR_CACHE_OFFS; 7255 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7256 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 7257 7258 /* BM */ 7259 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 7260 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 7261 7262 /* Descriptors */ 7263 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 7264 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 7265 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 7266 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 7267 7268 /* Buffer Data */ 7269 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 7270 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 7271 7272 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 7273 << MVPP22_AXI_CODE_CACHE_OFFS; 7274 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 7275 << MVPP22_AXI_CODE_DOMAIN_OFFS; 7276 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 7277 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 7278 7279 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 7280 << MVPP22_AXI_CODE_CACHE_OFFS; 7281 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7282 << MVPP22_AXI_CODE_DOMAIN_OFFS; 7283 7284 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 7285 7286 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 7287 << MVPP22_AXI_CODE_CACHE_OFFS; 7288 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7289 << MVPP22_AXI_CODE_DOMAIN_OFFS; 7290 7291 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 7292 } 7293 7294 /* Initialize network controller common part HW */ 7295 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) 7296 { 7297 const struct mbus_dram_target_info *dram_target_info; 7298 int err, i; 7299 u32 val; 7300 7301 /* MBUS windows configuration */ 7302 dram_target_info = mv_mbus_dram_info(); 7303 if (dram_target_info) 7304 mvpp2_conf_mbus_windows(dram_target_info, priv); 7305 7306 if (priv->hw_version >= MVPP22) 7307 mvpp2_axi_init(priv); 7308 7309 /* Disable HW PHY polling */ 7310 if (priv->hw_version == MVPP21) { 7311 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 7312 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 7313 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 7314 } else { 7315 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 7316 val &= ~MVPP22_SMI_POLLING_EN; 7317 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 7318 } 7319 7320 /* Allocate and initialize aggregated TXQs */ 7321 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, 7322 sizeof(*priv->aggr_txqs), 7323 GFP_KERNEL); 7324 if (!priv->aggr_txqs) 7325 return -ENOMEM; 7326 7327 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 7328 priv->aggr_txqs[i].id = i; 7329 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 7330 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); 7331 if (err < 0) 7332 return err; 7333 } 7334 7335 /* Fifo Init */ 7336 if (priv->hw_version == MVPP21) { 7337 mvpp2_rx_fifo_init(priv); 7338 } else { 7339 mvpp22_rx_fifo_init(priv); 7340 mvpp22_tx_fifo_init(priv); 7341 if (priv->hw_version == MVPP23) 7342 mvpp23_rx_fifo_fc_set_tresh(priv); 7343 } 7344 7345 if (priv->hw_version == MVPP21) 7346 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 7347 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 7348 7349 /* Allow cache snoop when transmiting packets */ 7350 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 7351 7352 /* Buffer Manager initialization */ 7353 err = mvpp2_bm_init(&pdev->dev, priv); 7354 if (err < 0) 7355 return err; 7356 7357 /* Parser default initialization */ 7358 err = mvpp2_prs_default_init(pdev, priv); 7359 if (err < 0) 7360 return err; 7361 7362 /* Classifier default initialization */ 7363 mvpp2_cls_init(priv); 7364 7365 return 0; 7366 } 7367 7368 static int mvpp2_get_sram(struct platform_device *pdev, 7369 struct mvpp2 *priv) 7370 { 7371 struct resource *res; 7372 void __iomem *base; 7373 7374 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 7375 if (!res) { 7376 if (has_acpi_companion(&pdev->dev)) 7377 dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n"); 7378 else 7379 dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n"); 7380 return 0; 7381 } 7382 7383 base = devm_ioremap_resource(&pdev->dev, res); 7384 if (IS_ERR(base)) 7385 return PTR_ERR(base); 7386 7387 priv->cm3_base = base; 7388 return 0; 7389 } 7390 7391 static int mvpp2_probe(struct platform_device *pdev) 7392 { 7393 struct fwnode_handle *fwnode = pdev->dev.fwnode; 7394 struct fwnode_handle *port_fwnode; 7395 struct mvpp2 *priv; 7396 struct resource *res; 7397 void __iomem *base; 7398 int i, shared; 7399 int err; 7400 7401 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 7402 if (!priv) 7403 return -ENOMEM; 7404 7405 priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev); 7406 7407 /* multi queue mode isn't supported on PPV2.1, fallback to single 7408 * mode 7409 */ 7410 if (priv->hw_version == MVPP21) 7411 queue_mode = MVPP2_QDIST_SINGLE_MODE; 7412 7413 base = devm_platform_ioremap_resource(pdev, 0); 7414 if (IS_ERR(base)) 7415 return PTR_ERR(base); 7416 7417 if (priv->hw_version == MVPP21) { 7418 priv->lms_base = devm_platform_ioremap_resource(pdev, 1); 7419 if (IS_ERR(priv->lms_base)) 7420 return PTR_ERR(priv->lms_base); 7421 } else { 7422 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 7423 if (!res) { 7424 dev_err(&pdev->dev, "Invalid resource\n"); 7425 return -EINVAL; 7426 } 7427 if (has_acpi_companion(&pdev->dev)) { 7428 /* In case the MDIO memory region is declared in 7429 * the ACPI, it can already appear as 'in-use' 7430 * in the OS. Because it is overlapped by second 7431 * region of the network controller, make 7432 * sure it is released, before requesting it again. 7433 * The care is taken by mvpp2 driver to avoid 7434 * concurrent access to this memory region. 7435 */ 7436 release_resource(res); 7437 } 7438 priv->iface_base = devm_ioremap_resource(&pdev->dev, res); 7439 if (IS_ERR(priv->iface_base)) 7440 return PTR_ERR(priv->iface_base); 7441 7442 /* Map CM3 SRAM */ 7443 err = mvpp2_get_sram(pdev, priv); 7444 if (err) 7445 dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n"); 7446 7447 /* Enable global Flow Control only if handler to SRAM not NULL */ 7448 if (priv->cm3_base) 7449 priv->global_tx_fc = true; 7450 } 7451 7452 if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) { 7453 priv->sysctrl_base = 7454 syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 7455 "marvell,system-controller"); 7456 if (IS_ERR(priv->sysctrl_base)) 7457 /* The system controller regmap is optional for dt 7458 * compatibility reasons. When not provided, the 7459 * configuration of the GoP relies on the 7460 * firmware/bootloader. 7461 */ 7462 priv->sysctrl_base = NULL; 7463 } 7464 7465 if (priv->hw_version >= MVPP22 && 7466 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) 7467 priv->percpu_pools = 1; 7468 7469 mvpp2_setup_bm_pool(); 7470 7471 7472 priv->nthreads = min_t(unsigned int, num_present_cpus(), 7473 MVPP2_MAX_THREADS); 7474 7475 shared = num_present_cpus() - priv->nthreads; 7476 if (shared > 0) 7477 bitmap_set(&priv->lock_map, 0, 7478 min_t(int, shared, MVPP2_MAX_THREADS)); 7479 7480 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 7481 u32 addr_space_sz; 7482 7483 addr_space_sz = (priv->hw_version == MVPP21 ? 7484 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); 7485 priv->swth_base[i] = base + i * addr_space_sz; 7486 } 7487 7488 if (priv->hw_version == MVPP21) 7489 priv->max_port_rxqs = 8; 7490 else 7491 priv->max_port_rxqs = 32; 7492 7493 if (dev_of_node(&pdev->dev)) { 7494 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); 7495 if (IS_ERR(priv->pp_clk)) 7496 return PTR_ERR(priv->pp_clk); 7497 err = clk_prepare_enable(priv->pp_clk); 7498 if (err < 0) 7499 return err; 7500 7501 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); 7502 if (IS_ERR(priv->gop_clk)) { 7503 err = PTR_ERR(priv->gop_clk); 7504 goto err_pp_clk; 7505 } 7506 err = clk_prepare_enable(priv->gop_clk); 7507 if (err < 0) 7508 goto err_pp_clk; 7509 7510 if (priv->hw_version >= MVPP22) { 7511 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); 7512 if (IS_ERR(priv->mg_clk)) { 7513 err = PTR_ERR(priv->mg_clk); 7514 goto err_gop_clk; 7515 } 7516 7517 err = clk_prepare_enable(priv->mg_clk); 7518 if (err < 0) 7519 goto err_gop_clk; 7520 7521 priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk"); 7522 if (IS_ERR(priv->mg_core_clk)) { 7523 err = PTR_ERR(priv->mg_core_clk); 7524 goto err_mg_clk; 7525 } 7526 7527 err = clk_prepare_enable(priv->mg_core_clk); 7528 if (err < 0) 7529 goto err_mg_clk; 7530 } 7531 7532 priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk"); 7533 if (IS_ERR(priv->axi_clk)) { 7534 err = PTR_ERR(priv->axi_clk); 7535 goto err_mg_core_clk; 7536 } 7537 7538 err = clk_prepare_enable(priv->axi_clk); 7539 if (err < 0) 7540 goto err_mg_core_clk; 7541 7542 /* Get system's tclk rate */ 7543 priv->tclk = clk_get_rate(priv->pp_clk); 7544 } else { 7545 err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk); 7546 if (err) { 7547 dev_err(&pdev->dev, "missing clock-frequency value\n"); 7548 return err; 7549 } 7550 } 7551 7552 if (priv->hw_version >= MVPP22) { 7553 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); 7554 if (err) 7555 goto err_axi_clk; 7556 /* Sadly, the BM pools all share the same register to 7557 * store the high 32 bits of their address. So they 7558 * must all have the same high 32 bits, which forces 7559 * us to restrict coherent memory to DMA_BIT_MASK(32). 7560 */ 7561 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 7562 if (err) 7563 goto err_axi_clk; 7564 } 7565 7566 /* Map DTS-active ports. Should be done before FIFO mvpp2_init */ 7567 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 7568 if (!fwnode_property_read_u32(port_fwnode, "port-id", &i)) 7569 priv->port_map |= BIT(i); 7570 } 7571 7572 if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) 7573 priv->hw_version = MVPP23; 7574 7575 /* Init mss lock */ 7576 spin_lock_init(&priv->mss_spinlock); 7577 7578 /* Initialize network controller */ 7579 err = mvpp2_init(pdev, priv); 7580 if (err < 0) { 7581 dev_err(&pdev->dev, "failed to initialize controller\n"); 7582 goto err_axi_clk; 7583 } 7584 7585 err = mvpp22_tai_probe(&pdev->dev, priv); 7586 if (err < 0) 7587 goto err_axi_clk; 7588 7589 /* Initialize ports */ 7590 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 7591 err = mvpp2_port_probe(pdev, port_fwnode, priv); 7592 if (err < 0) 7593 goto err_port_probe; 7594 } 7595 7596 if (priv->port_count == 0) { 7597 dev_err(&pdev->dev, "no ports enabled\n"); 7598 err = -ENODEV; 7599 goto err_axi_clk; 7600 } 7601 7602 /* Statistics must be gathered regularly because some of them (like 7603 * packets counters) are 32-bit registers and could overflow quite 7604 * quickly. For instance, a 10Gb link used at full bandwidth with the 7605 * smallest packets (64B) will overflow a 32-bit counter in less than 7606 * 30 seconds. Then, use a workqueue to fill 64-bit counters. 7607 */ 7608 snprintf(priv->queue_name, sizeof(priv->queue_name), 7609 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), 7610 priv->port_count > 1 ? "+" : ""); 7611 priv->stats_queue = create_singlethread_workqueue(priv->queue_name); 7612 if (!priv->stats_queue) { 7613 err = -ENOMEM; 7614 goto err_port_probe; 7615 } 7616 7617 if (priv->global_tx_fc && priv->hw_version >= MVPP22) { 7618 err = mvpp2_enable_global_fc(priv); 7619 if (err) 7620 dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n"); 7621 } 7622 7623 mvpp2_dbgfs_init(priv, pdev->name); 7624 7625 platform_set_drvdata(pdev, priv); 7626 return 0; 7627 7628 err_port_probe: 7629 fwnode_handle_put(port_fwnode); 7630 7631 i = 0; 7632 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 7633 if (priv->port_list[i]) 7634 mvpp2_port_remove(priv->port_list[i]); 7635 i++; 7636 } 7637 err_axi_clk: 7638 clk_disable_unprepare(priv->axi_clk); 7639 err_mg_core_clk: 7640 clk_disable_unprepare(priv->mg_core_clk); 7641 err_mg_clk: 7642 clk_disable_unprepare(priv->mg_clk); 7643 err_gop_clk: 7644 clk_disable_unprepare(priv->gop_clk); 7645 err_pp_clk: 7646 clk_disable_unprepare(priv->pp_clk); 7647 return err; 7648 } 7649 7650 static int mvpp2_remove(struct platform_device *pdev) 7651 { 7652 struct mvpp2 *priv = platform_get_drvdata(pdev); 7653 struct fwnode_handle *fwnode = pdev->dev.fwnode; 7654 int i = 0, poolnum = MVPP2_BM_POOLS_NUM; 7655 struct fwnode_handle *port_fwnode; 7656 7657 mvpp2_dbgfs_cleanup(priv); 7658 7659 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 7660 if (priv->port_list[i]) { 7661 mutex_destroy(&priv->port_list[i]->gather_stats_lock); 7662 mvpp2_port_remove(priv->port_list[i]); 7663 } 7664 i++; 7665 } 7666 7667 destroy_workqueue(priv->stats_queue); 7668 7669 if (priv->percpu_pools) 7670 poolnum = mvpp2_get_nrxqs(priv) * 2; 7671 7672 for (i = 0; i < poolnum; i++) { 7673 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; 7674 7675 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); 7676 } 7677 7678 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 7679 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; 7680 7681 dma_free_coherent(&pdev->dev, 7682 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 7683 aggr_txq->descs, 7684 aggr_txq->descs_dma); 7685 } 7686 7687 if (is_acpi_node(port_fwnode)) 7688 return 0; 7689 7690 clk_disable_unprepare(priv->axi_clk); 7691 clk_disable_unprepare(priv->mg_core_clk); 7692 clk_disable_unprepare(priv->mg_clk); 7693 clk_disable_unprepare(priv->pp_clk); 7694 clk_disable_unprepare(priv->gop_clk); 7695 7696 return 0; 7697 } 7698 7699 static const struct of_device_id mvpp2_match[] = { 7700 { 7701 .compatible = "marvell,armada-375-pp2", 7702 .data = (void *)MVPP21, 7703 }, 7704 { 7705 .compatible = "marvell,armada-7k-pp22", 7706 .data = (void *)MVPP22, 7707 }, 7708 { } 7709 }; 7710 MODULE_DEVICE_TABLE(of, mvpp2_match); 7711 7712 #ifdef CONFIG_ACPI 7713 static const struct acpi_device_id mvpp2_acpi_match[] = { 7714 { "MRVL0110", MVPP22 }, 7715 { }, 7716 }; 7717 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); 7718 #endif 7719 7720 static struct platform_driver mvpp2_driver = { 7721 .probe = mvpp2_probe, 7722 .remove = mvpp2_remove, 7723 .driver = { 7724 .name = MVPP2_DRIVER_NAME, 7725 .of_match_table = mvpp2_match, 7726 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), 7727 }, 7728 }; 7729 7730 static int __init mvpp2_driver_init(void) 7731 { 7732 return platform_driver_register(&mvpp2_driver); 7733 } 7734 module_init(mvpp2_driver_init); 7735 7736 static void __exit mvpp2_driver_exit(void) 7737 { 7738 platform_driver_unregister(&mvpp2_driver); 7739 mvpp2_dbgfs_exit(); 7740 } 7741 module_exit(mvpp2_driver_exit); 7742 7743 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); 7744 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); 7745 MODULE_LICENSE("GPL v2"); 7746